summaryrefslogtreecommitdiffstats
path: root/js/src/vm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /js/src/vm
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/vm')
-rw-r--r--js/src/vm/Activation-inl.h172
-rw-r--r--js/src/vm/Activation.cpp84
-rw-r--r--js/src/vm/Activation.h565
-rw-r--r--js/src/vm/ArgumentsObject-inl.h58
-rw-r--r--js/src/vm/ArgumentsObject.cpp1182
-rw-r--r--js/src/vm/ArgumentsObject.h566
-rw-r--r--js/src/vm/ArrayBufferObject-inl.h57
-rw-r--r--js/src/vm/ArrayBufferObject.cpp2204
-rw-r--r--js/src/vm/ArrayBufferObject.h660
-rw-r--r--js/src/vm/ArrayBufferObjectMaybeShared.cpp76
-rw-r--r--js/src/vm/ArrayBufferViewObject.cpp319
-rw-r--r--js/src/vm/ArrayBufferViewObject.h166
-rw-r--r--js/src/vm/ArrayObject-inl.h87
-rw-r--r--js/src/vm/ArrayObject.h62
-rw-r--r--js/src/vm/AsyncFunction.cpp349
-rw-r--r--js/src/vm/AsyncFunction.h324
-rw-r--r--js/src/vm/AsyncFunctionResolveKind.h18
-rw-r--r--js/src/vm/AsyncIteration.cpp1484
-rw-r--r--js/src/vm/AsyncIteration.h571
-rw-r--r--js/src/vm/AtomsTable.h123
-rw-r--r--js/src/vm/BigIntType.cpp3847
-rw-r--r--js/src/vm/BigIntType.h481
-rw-r--r--js/src/vm/BindingKind.h111
-rw-r--r--js/src/vm/BooleanObject-inl.h28
-rw-r--r--js/src/vm/BooleanObject.h44
-rw-r--r--js/src/vm/BoundFunctionObject.cpp534
-rw-r--r--js/src/vm/BoundFunctionObject.h174
-rw-r--r--js/src/vm/BuildId.cpp27
-rw-r--r--js/src/vm/BuiltinObjectKind.cpp205
-rw-r--r--js/src/vm/BuiltinObjectKind.h88
-rw-r--r--js/src/vm/BytecodeFormatFlags.h61
-rw-r--r--js/src/vm/BytecodeIterator-inl.h40
-rw-r--r--js/src/vm/BytecodeIterator.h85
-rw-r--r--js/src/vm/BytecodeLocation-inl.h115
-rw-r--r--js/src/vm/BytecodeLocation.cpp28
-rw-r--r--js/src/vm/BytecodeLocation.h354
-rw-r--r--js/src/vm/BytecodeUtil-inl.h242
-rw-r--r--js/src/vm/BytecodeUtil.cpp3110
-rw-r--r--js/src/vm/BytecodeUtil.h665
-rw-r--r--js/src/vm/Caches.h568
-rw-r--r--js/src/vm/CallAndConstruct.cpp168
-rw-r--r--js/src/vm/CallNonGenericMethod.cpp35
-rw-r--r--js/src/vm/CharacterEncoding.cpp888
-rw-r--r--js/src/vm/CheckIsObjectKind.h24
-rw-r--r--js/src/vm/CodeCoverage.cpp673
-rw-r--r--js/src/vm/CodeCoverage.h172
-rw-r--r--js/src/vm/CommonPropertyNames.h619
-rw-r--r--js/src/vm/Compartment-inl.h442
-rw-r--r--js/src/vm/Compartment.cpp616
-rw-r--r--js/src/vm/Compartment.h537
-rw-r--r--js/src/vm/CompilationAndEvaluation.cpp613
-rw-r--r--js/src/vm/CompletionKind.h16
-rw-r--r--js/src/vm/Compression.cpp262
-rw-r--r--js/src/vm/Compression.h115
-rw-r--r--js/src/vm/DateObject.h101
-rw-r--r--js/src/vm/DateTime.cpp824
-rw-r--r--js/src/vm/DateTime.h388
-rw-r--r--js/src/vm/EnvironmentObject-inl.h87
-rw-r--r--js/src/vm/EnvironmentObject.cpp4399
-rw-r--r--js/src/vm/EnvironmentObject.h1512
-rw-r--r--js/src/vm/EqualityOperations.cpp360
-rw-r--r--js/src/vm/EqualityOperations.h72
-rw-r--r--js/src/vm/ErrorMessages.cpp29
-rw-r--r--js/src/vm/ErrorObject-inl.h39
-rw-r--r--js/src/vm/ErrorObject.cpp814
-rw-r--r--js/src/vm/ErrorObject.h167
-rw-r--r--js/src/vm/ErrorReporting.cpp585
-rw-r--r--js/src/vm/ErrorReporting.h190
-rw-r--r--js/src/vm/Exception.cpp60
-rw-r--r--js/src/vm/ForOfIterator.cpp211
-rw-r--r--js/src/vm/FrameIter-inl.h54
-rw-r--r--js/src/vm/FrameIter.cpp1060
-rw-r--r--js/src/vm/FrameIter.h586
-rw-r--r--js/src/vm/FunctionFlags.cpp13
-rw-r--r--js/src/vm/FunctionFlags.h320
-rw-r--r--js/src/vm/FunctionPrefixKind.h18
-rw-r--r--js/src/vm/GeckoProfiler-inl.h141
-rw-r--r--js/src/vm/GeckoProfiler.cpp561
-rw-r--r--js/src/vm/GeckoProfiler.h255
-rw-r--r--js/src/vm/GeneratorAndAsyncKind.h17
-rw-r--r--js/src/vm/GeneratorObject.cpp508
-rw-r--r--js/src/vm/GeneratorObject.h255
-rw-r--r--js/src/vm/GeneratorResumeKind.h18
-rw-r--r--js/src/vm/GetterSetter.cpp27
-rw-r--r--js/src/vm/GetterSetter.h116
-rw-r--r--js/src/vm/GlobalObject-inl.h26
-rw-r--r--js/src/vm/GlobalObject.cpp1052
-rw-r--r--js/src/vm/GlobalObject.h1166
-rw-r--r--js/src/vm/HelperThreadState.h823
-rw-r--r--js/src/vm/HelperThreadTask.h82
-rw-r--r--js/src/vm/HelperThreads.cpp2745
-rw-r--r--js/src/vm/HelperThreads.h292
-rw-r--r--js/src/vm/Id.cpp50
-rw-r--r--js/src/vm/Initialization.cpp357
-rw-r--r--js/src/vm/InlineCharBuffer-inl.h158
-rw-r--r--js/src/vm/InternalThreadPool.cpp289
-rw-r--r--js/src/vm/InternalThreadPool.h74
-rw-r--r--js/src/vm/Interpreter-inl.h639
-rw-r--r--js/src/vm/Interpreter.cpp5605
-rw-r--r--js/src/vm/Interpreter.h705
-rw-r--r--js/src/vm/IsGivenTypeObject-inl.h33
-rw-r--r--js/src/vm/Iteration.cpp2168
-rw-r--r--js/src/vm/Iteration.h794
-rw-r--r--js/src/vm/JSAtom-inl.h157
-rw-r--r--js/src/vm/JSAtom.cpp1148
-rw-r--r--js/src/vm/JSAtom.h113
-rw-r--r--js/src/vm/JSAtomState.h63
-rw-r--r--js/src/vm/JSContext-inl.h407
-rw-r--r--js/src/vm/JSContext.cpp1386
-rw-r--r--js/src/vm/JSContext.h1139
-rw-r--r--js/src/vm/JSFunction-inl.h141
-rw-r--r--js/src/vm/JSFunction.cpp1979
-rw-r--r--js/src/vm/JSFunction.h875
-rw-r--r--js/src/vm/JSONParser.cpp1107
-rw-r--r--js/src/vm/JSONParser.h517
-rw-r--r--js/src/vm/JSONPrinter.cpp273
-rw-r--r--js/src/vm/JSONPrinter.h93
-rw-r--r--js/src/vm/JSObject-inl.h597
-rw-r--r--js/src/vm/JSObject.cpp3649
-rw-r--r--js/src/vm/JSObject.h1099
-rw-r--r--js/src/vm/JSScript-inl.h245
-rw-r--r--js/src/vm/JSScript.cpp3779
-rw-r--r--js/src/vm/JSScript.h2265
-rw-r--r--js/src/vm/JitActivation.cpp261
-rw-r--r--js/src/vm/JitActivation.h268
-rw-r--r--js/src/vm/List-inl.h129
-rw-r--r--js/src/vm/List.cpp11
-rw-r--r--js/src/vm/List.h91
-rw-r--r--js/src/vm/MallocProvider.h255
-rw-r--r--js/src/vm/MatchPairs.h141
-rw-r--r--js/src/vm/MemoryMetrics.cpp889
-rw-r--r--js/src/vm/ModuleBuilder.h118
-rw-r--r--js/src/vm/Modules.cpp1830
-rw-r--r--js/src/vm/Modules.h45
-rw-r--r--js/src/vm/Monitor.h72
-rw-r--r--js/src/vm/MutexIDs.h81
-rw-r--r--js/src/vm/NativeObject-inl.h908
-rw-r--r--js/src/vm/NativeObject.cpp2854
-rw-r--r--js/src/vm/NativeObject.h1892
-rw-r--r--js/src/vm/NumberObject-inl.h28
-rw-r--r--js/src/vm/NumberObject.h44
-rw-r--r--js/src/vm/ObjectFlags-inl.h61
-rw-r--r--js/src/vm/ObjectFlags.h77
-rw-r--r--js/src/vm/ObjectOperations-inl.h388
-rw-r--r--js/src/vm/ObjectOperations.h301
-rw-r--r--js/src/vm/OffThreadPromiseRuntimeState.cpp299
-rw-r--r--js/src/vm/OffThreadPromiseRuntimeState.h208
-rw-r--r--js/src/vm/OffThreadScriptCompilation.cpp153
-rw-r--r--js/src/vm/Opcodes.h3632
-rw-r--r--js/src/vm/PIC.cpp372
-rw-r--r--js/src/vm/PIC.h246
-rw-r--r--js/src/vm/PlainObject-inl.h94
-rw-r--r--js/src/vm/PlainObject.cpp334
-rw-r--r--js/src/vm/PlainObject.h111
-rw-r--r--js/src/vm/Printer.cpp559
-rw-r--r--js/src/vm/Probes-inl.h95
-rw-r--r--js/src/vm/Probes.cpp64
-rw-r--r--js/src/vm/Probes.h144
-rw-r--r--js/src/vm/ProfilingStack.cpp53
-rw-r--r--js/src/vm/PromiseLookup.cpp273
-rw-r--r--js/src/vm/PromiseLookup.h163
-rw-r--r--js/src/vm/PromiseObject.h250
-rw-r--r--js/src/vm/PropMap-inl.h251
-rw-r--r--js/src/vm/PropMap.cpp1233
-rw-r--r--js/src/vm/PropMap.h1167
-rw-r--r--js/src/vm/PropertyAndElement.cpp995
-rw-r--r--js/src/vm/PropertyDescriptor.cpp91
-rw-r--r--js/src/vm/PropertyInfo.h221
-rw-r--r--js/src/vm/PropertyKey.h60
-rw-r--r--js/src/vm/PropertyResult.h103
-rw-r--r--js/src/vm/ProxyObject.cpp206
-rw-r--r--js/src/vm/ProxyObject.h165
-rw-r--r--js/src/vm/Realm-inl.h110
-rw-r--r--js/src/vm/Realm.cpp774
-rw-r--r--js/src/vm/Realm.h886
-rw-r--r--js/src/vm/RecordTupleShared.cpp133
-rw-r--r--js/src/vm/RecordTupleShared.h32
-rw-r--r--js/src/vm/RecordType.cpp538
-rw-r--r--js/src/vm/RecordType.h78
-rw-r--r--js/src/vm/RegExpObject.cpp1232
-rw-r--r--js/src/vm/RegExpObject.h223
-rw-r--r--js/src/vm/RegExpShared.h449
-rw-r--r--js/src/vm/RegExpStatics.cpp61
-rw-r--r--js/src/vm/RegExpStatics.h307
-rw-r--r--js/src/vm/Runtime.cpp847
-rw-r--r--js/src/vm/Runtime.h1144
-rw-r--r--js/src/vm/SavedFrame.h297
-rw-r--r--js/src/vm/SavedStacks-inl.h29
-rw-r--r--js/src/vm/SavedStacks.cpp2097
-rw-r--r--js/src/vm/SavedStacks.h342
-rw-r--r--js/src/vm/Scope.cpp1728
-rw-r--r--js/src/vm/Scope.h1891
-rw-r--r--js/src/vm/ScopeKind.h53
-rw-r--r--js/src/vm/SelfHosting.cpp2784
-rw-r--r--js/src/vm/SelfHosting.h287
-rw-r--r--js/src/vm/Shape-inl.h105
-rw-r--r--js/src/vm/Shape.cpp1484
-rw-r--r--js/src/vm/Shape.h925
-rw-r--r--js/src/vm/ShapeZone.cpp125
-rw-r--r--js/src/vm/ShapeZone.h244
-rw-r--r--js/src/vm/SharedArrayObject.cpp588
-rw-r--r--js/src/vm/SharedArrayObject.h327
-rw-r--r--js/src/vm/SharedImmutableStringsCache-inl.h75
-rw-r--r--js/src/vm/SharedImmutableStringsCache.cpp147
-rw-r--r--js/src/vm/SharedImmutableStringsCache.h425
-rw-r--r--js/src/vm/SharedMem.h208
-rw-r--r--js/src/vm/SharedScriptDataTableHolder.cpp19
-rw-r--r--js/src/vm/SharedScriptDataTableHolder.h88
-rw-r--r--js/src/vm/SharedStencil.h849
-rw-r--r--js/src/vm/SourceHook.cpp26
-rw-r--r--js/src/vm/Stack-inl.h859
-rw-r--r--js/src/vm/Stack.cpp766
-rw-r--r--js/src/vm/Stack.h999
-rw-r--r--js/src/vm/StaticStrings.cpp89
-rw-r--r--js/src/vm/StaticStrings.h276
-rw-r--r--js/src/vm/StencilCache.cpp67
-rw-r--r--js/src/vm/StencilCache.h181
-rw-r--r--js/src/vm/StencilEnums.h346
-rw-r--r--js/src/vm/StencilObject.cpp147
-rw-r--r--js/src/vm/StencilObject.h71
-rw-r--r--js/src/vm/StringObject-inl.h51
-rw-r--r--js/src/vm/StringObject.h72
-rw-r--r--js/src/vm/StringType-inl.h526
-rw-r--r--js/src/vm/StringType.cpp2276
-rw-r--r--js/src/vm/StringType.h2052
-rw-r--r--js/src/vm/StructuredClone.cpp4123
-rw-r--r--js/src/vm/SymbolType.cpp146
-rw-r--r--js/src/vm/SymbolType.h153
-rw-r--r--js/src/vm/TaggedProto.cpp34
-rw-r--r--js/src/vm/TaggedProto.h173
-rw-r--r--js/src/vm/ThrowMsgKind.cpp36
-rw-r--r--js/src/vm/ThrowMsgKind.h37
-rw-r--r--js/src/vm/Time.cpp383
-rw-r--r--js/src/vm/Time.h176
-rw-r--r--js/src/vm/ToSource.cpp249
-rw-r--r--js/src/vm/ToSource.h26
-rw-r--r--js/src/vm/TupleType.cpp639
-rw-r--r--js/src/vm/TupleType.h87
-rw-r--r--js/src/vm/TypedArrayObject-inl.h769
-rw-r--r--js/src/vm/TypedArrayObject.cpp2998
-rw-r--r--js/src/vm/TypedArrayObject.h301
-rw-r--r--js/src/vm/UbiNode.cpp527
-rw-r--r--js/src/vm/UbiNodeCensus.cpp1323
-rw-r--r--js/src/vm/UbiNodeShortestPaths.cpp105
-rw-r--r--js/src/vm/Uint8Clamped.h121
-rw-r--r--js/src/vm/UsageStatistics.cpp20
-rw-r--r--js/src/vm/Value.cpp41
-rw-r--r--js/src/vm/Warnings.cpp105
-rw-r--r--js/src/vm/Warnings.h27
-rw-r--r--js/src/vm/Watchtower.cpp296
-rw-r--r--js/src/vm/Watchtower.h120
-rw-r--r--js/src/vm/WellKnownAtom.cpp45
-rw-r--r--js/src/vm/WellKnownAtom.h67
-rw-r--r--js/src/vm/WindowProxy.cpp70
-rw-r--r--js/src/vm/WrapperObject.h40
-rw-r--r--js/src/vm/Xdr.cpp167
-rw-r--r--js/src/vm/Xdr.h457
-rw-r--r--js/src/vm/jsopcode.py382
-rwxr-xr-xjs/src/vm/make_opcode_doc.py195
259 files changed, 144450 insertions, 0 deletions
diff --git a/js/src/vm/Activation-inl.h b/js/src/vm/Activation-inl.h
new file mode 100644
index 0000000000..1ee1439beb
--- /dev/null
+++ b/js/src/vm/Activation-inl.h
@@ -0,0 +1,172 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Activation_inl_h
+#define vm_Activation_inl_h
+
+#include "vm/Activation.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT{,_IF}, MOZ_CRASH
+#include "mozilla/Likely.h" // MOZ_UNLIKELY
+#include "mozilla/Maybe.h" // mozilla::Maybe
+
+#include "jit/CalleeToken.h" // js::jit::CalleeToken
+#include "js/Debug.h" // JS::dbg::AutoEntryMonitor
+#include "vm/FrameIter.h" // js::FrameIter
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h" // JSContext
+#include "vm/Stack.h" // js::AbstractFramePtr
+
+namespace js {
+
+inline ActivationEntryMonitor::ActivationEntryMonitor(JSContext* cx)
+ : cx_(cx), entryMonitor_(cx->entryMonitor) {
+ cx->entryMonitor = nullptr;
+}
+
+inline ActivationEntryMonitor::ActivationEntryMonitor(
+ JSContext* cx, InterpreterFrame* entryFrame)
+ : ActivationEntryMonitor(cx) {
+ if (MOZ_UNLIKELY(entryMonitor_)) {
+ init(cx, entryFrame);
+ }
+}
+
+inline ActivationEntryMonitor::ActivationEntryMonitor(
+ JSContext* cx, jit::CalleeToken entryToken)
+ : ActivationEntryMonitor(cx) {
+ if (MOZ_UNLIKELY(entryMonitor_)) {
+ init(cx, entryToken);
+ }
+}
+
+inline ActivationEntryMonitor::~ActivationEntryMonitor() {
+ if (entryMonitor_) {
+ entryMonitor_->Exit(cx_);
+ }
+
+ cx_->entryMonitor = entryMonitor_;
+}
+
+inline Activation::Activation(JSContext* cx, Kind kind)
+ : cx_(cx),
+ compartment_(cx->compartment()),
+ prev_(cx->activation_),
+ prevProfiling_(prev_ ? prev_->mostRecentProfiling() : nullptr),
+ hideScriptedCallerCount_(0),
+ frameCache_(cx),
+ asyncStack_(cx, cx->asyncStackForNewActivations()),
+ asyncCause_(cx->asyncCauseForNewActivations),
+ asyncCallIsExplicit_(cx->asyncCallIsExplicit),
+ kind_(kind) {
+ cx->asyncStackForNewActivations() = nullptr;
+ cx->asyncCauseForNewActivations = nullptr;
+ cx->asyncCallIsExplicit = false;
+ cx->activation_ = this;
+}
+
+inline Activation::~Activation() {
+ MOZ_ASSERT_IF(isProfiling(), this != cx_->profilingActivation_);
+ MOZ_ASSERT(cx_->activation_ == this);
+ MOZ_ASSERT(hideScriptedCallerCount_ == 0);
+ cx_->activation_ = prev_;
+ cx_->asyncCauseForNewActivations = asyncCause_;
+ cx_->asyncStackForNewActivations() = asyncStack_;
+ cx_->asyncCallIsExplicit = asyncCallIsExplicit_;
+}
+
+inline bool Activation::isProfiling() const {
+ if (isInterpreter()) {
+ return asInterpreter()->isProfiling();
+ }
+
+ MOZ_ASSERT(isJit());
+ return asJit()->isProfiling();
+}
+
+inline Activation* Activation::mostRecentProfiling() {
+ if (isProfiling()) {
+ return this;
+ }
+ return prevProfiling_;
+}
+
+inline LiveSavedFrameCache* Activation::getLiveSavedFrameCache(JSContext* cx) {
+ if (!frameCache_.get().initialized() && !frameCache_.get().init(cx)) {
+ return nullptr;
+ }
+ return frameCache_.address();
+}
+
+/* static */ inline mozilla::Maybe<LiveSavedFrameCache::FramePtr>
+LiveSavedFrameCache::FramePtr::create(const FrameIter& iter) {
+ if (iter.done()) {
+ return mozilla::Nothing();
+ }
+
+ if (iter.isPhysicalJitFrame()) {
+ return mozilla::Some(FramePtr(iter.physicalJitFrame()));
+ }
+
+ if (!iter.hasUsableAbstractFramePtr()) {
+ return mozilla::Nothing();
+ }
+
+ auto afp = iter.abstractFramePtr();
+
+ if (afp.isInterpreterFrame()) {
+ return mozilla::Some(FramePtr(afp.asInterpreterFrame()));
+ }
+ if (afp.isWasmDebugFrame()) {
+ return mozilla::Some(FramePtr(afp.asWasmDebugFrame()));
+ }
+ if (afp.isRematerializedFrame()) {
+ return mozilla::Some(FramePtr(afp.asRematerializedFrame()));
+ }
+
+ MOZ_CRASH("unexpected frame type");
+}
+
+struct LiveSavedFrameCache::FramePtr::HasCachedMatcher {
+ template <typename Frame>
+ bool operator()(Frame* f) const {
+ return f->hasCachedSavedFrame();
+ }
+};
+
+inline bool LiveSavedFrameCache::FramePtr::hasCachedSavedFrame() const {
+ return ptr.match(HasCachedMatcher());
+}
+
+struct LiveSavedFrameCache::FramePtr::SetHasCachedMatcher {
+ template <typename Frame>
+ void operator()(Frame* f) {
+ f->setHasCachedSavedFrame();
+ }
+};
+
+inline void LiveSavedFrameCache::FramePtr::setHasCachedSavedFrame() {
+ ptr.match(SetHasCachedMatcher());
+}
+
+struct LiveSavedFrameCache::FramePtr::ClearHasCachedMatcher {
+ template <typename Frame>
+ void operator()(Frame* f) {
+ f->clearHasCachedSavedFrame();
+ }
+};
+
+inline void LiveSavedFrameCache::FramePtr::clearHasCachedSavedFrame() {
+ ptr.match(ClearHasCachedMatcher());
+}
+
+inline bool Activation::hasWasmExitFP() const {
+ return isJit() && asJit()->hasWasmExitFP();
+}
+
+} // namespace js
+
+#endif // vm_Activation_inl_h
diff --git a/js/src/vm/Activation.cpp b/js/src/vm/Activation.cpp
new file mode 100644
index 0000000000..19b383c9c5
--- /dev/null
+++ b/js/src/vm/Activation.cpp
@@ -0,0 +1,84 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/Activation-inl.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include "gc/GC.h" // js::gc::AutoSuppressGC
+#include "jit/CalleeToken.h" // js::jit::CalleeToken{IsFunction,To{Function,Script}}
+#include "js/RootingAPI.h" // JS::Rooted
+#include "js/Value.h" // JS::Value
+#include "vm/JSContext.h" // JSContext, js::TlsContext
+#include "vm/Stack.h" // js::InterpreterFrame
+
+#include "vm/Compartment-inl.h" // JS::Compartment::wrap
+
+using namespace js;
+
+using JS::ObjectOrNullValue;
+using JS::Rooted;
+using JS::UndefinedValue;
+using JS::Value;
+
+Value ActivationEntryMonitor::asyncStack(JSContext* cx) {
+ Rooted<Value> stack(cx, ObjectOrNullValue(cx->asyncStackForNewActivations()));
+ if (!cx->compartment()->wrap(cx, &stack)) {
+ cx->clearPendingException();
+ return UndefinedValue();
+ }
+ return stack;
+}
+
+void ActivationEntryMonitor::init(JSContext* cx, InterpreterFrame* entryFrame) {
+ // The InterpreterFrame is not yet part of an Activation, so it won't
+ // be traced if we trigger GC here. Suppress GC to avoid this.
+ gc::AutoSuppressGC suppressGC(cx);
+ Rooted<Value> stack(cx, asyncStack(cx));
+ const char* asyncCause = cx->asyncCauseForNewActivations;
+ if (entryFrame->isFunctionFrame()) {
+ entryMonitor_->Entry(cx, &entryFrame->callee(), stack, asyncCause);
+ } else {
+ entryMonitor_->Entry(cx, entryFrame->script(), stack, asyncCause);
+ }
+}
+
+void ActivationEntryMonitor::init(JSContext* cx, jit::CalleeToken entryToken) {
+ // The CalleeToken is not traced at this point and we also don't want
+ // a GC to discard the code we're about to enter, so we suppress GC.
+ gc::AutoSuppressGC suppressGC(cx);
+ RootedValue stack(cx, asyncStack(cx));
+ const char* asyncCause = cx->asyncCauseForNewActivations;
+ if (jit::CalleeTokenIsFunction(entryToken)) {
+ entryMonitor_->Entry(cx_, jit::CalleeTokenToFunction(entryToken), stack,
+ asyncCause);
+ } else {
+ entryMonitor_->Entry(cx_, jit::CalleeTokenToScript(entryToken), stack,
+ asyncCause);
+ }
+}
+
+void Activation::registerProfiling() {
+ MOZ_ASSERT(isProfiling());
+ cx_->profilingActivation_ = this;
+}
+
+void Activation::unregisterProfiling() {
+ MOZ_ASSERT(isProfiling());
+ MOZ_ASSERT(cx_->profilingActivation_ == this);
+ cx_->profilingActivation_ = prevProfiling_;
+}
+
+ActivationIterator::ActivationIterator(JSContext* cx)
+ : activation_(cx->activation_) {
+ MOZ_ASSERT(cx == TlsContext.get());
+}
+
+ActivationIterator& ActivationIterator::operator++() {
+ MOZ_ASSERT(activation_);
+ activation_ = activation_->prev();
+ return *this;
+}
diff --git a/js/src/vm/Activation.h b/js/src/vm/Activation.h
new file mode 100644
index 0000000000..4153e27478
--- /dev/null
+++ b/js/src/vm/Activation.h
@@ -0,0 +1,565 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Activation_h
+#define vm_Activation_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Attributes.h" // MOZ_RAII
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "jit/CalleeToken.h" // js::jit::CalleeToken
+#include "js/RootingAPI.h" // JS::Handle, JS::Rooted
+#include "js/TypeDecls.h" // jsbytecode
+#include "js/Value.h" // JS::Value
+#include "vm/SavedFrame.h" // js::SavedFrame
+#include "vm/Stack.h" // js::InterpreterRegs
+
+struct JS_PUBLIC_API JSContext;
+
+class JSFunction;
+class JSObject;
+class JSScript;
+
+namespace JS {
+
+class CallArgs;
+class JS_PUBLIC_API Compartment;
+
+namespace dbg {
+class JS_PUBLIC_API AutoEntryMonitor;
+} // namespace dbg
+
+} // namespace JS
+
+namespace js {
+
+class InterpreterActivation;
+
+namespace jit {
+class JitActivation;
+} // namespace jit
+
+// This class is separate from Activation, because it calls Compartment::wrap()
+// which can GC and walk the stack. It's not safe to do that within the
+// JitActivation constructor.
+class MOZ_RAII ActivationEntryMonitor {
+ JSContext* cx_;
+
+ // The entry point monitor that was set on cx_->runtime() when this
+ // ActivationEntryMonitor was created.
+ JS::dbg::AutoEntryMonitor* entryMonitor_;
+
+ explicit inline ActivationEntryMonitor(JSContext* cx);
+
+ ActivationEntryMonitor(const ActivationEntryMonitor& other) = delete;
+ void operator=(const ActivationEntryMonitor& other) = delete;
+
+ void init(JSContext* cx, jit::CalleeToken entryToken);
+ void init(JSContext* cx, InterpreterFrame* entryFrame);
+
+ JS::Value asyncStack(JSContext* cx);
+
+ public:
+ inline ActivationEntryMonitor(JSContext* cx, InterpreterFrame* entryFrame);
+ inline ActivationEntryMonitor(JSContext* cx, jit::CalleeToken entryToken);
+ inline ~ActivationEntryMonitor();
+};
+
+// [SMDOC] LiveSavedFrameCache: SavedFrame caching to minimize stack walking
+//
+// Since each SavedFrame object includes a 'parent' pointer to the SavedFrame
+// for its caller, if we could easily find the right SavedFrame for a given
+// stack frame, we wouldn't need to walk the rest of the stack. Traversing deep
+// stacks can be expensive, and when we're profiling or instrumenting code, we
+// may want to capture JavaScript stacks frequently, so such cases would benefit
+// if we could avoid walking the entire stack.
+//
+// We could have a cache mapping frame addresses to their SavedFrame objects,
+// but invalidating its entries would be a challenge. Popping a stack frame is
+// extremely performance-sensitive, and SpiderMonkey stack frames can be OSR'd,
+// thrown, rematerialized, and perhaps meet other fates; we would rather our
+// cache not depend on handling so many tricky cases.
+//
+// It turns out that we can keep the cache accurate by reserving a single bit in
+// the stack frame, which must be clear on any newly pushed frame. When we
+// insert an entry into the cache mapping a given frame address to its
+// SavedFrame, we set the bit in the frame. Then, we take care to probe the
+// cache only for frames whose bit is set; the bit tells us that the frame has
+// never left the stack, so its cache entry must be accurate, at least about
+// which function the frame is executing (the line may have changed; more about
+// that below). The code refers to this bit as the 'hasCachedSavedFrame' flag.
+//
+// We could manage such a cache replacing least-recently used entries, but we
+// can do better than that: the cache can be a stack, of which we need examine
+// only entries from the top.
+//
+// First, observe that stacks are walked from the youngest frame to the oldest,
+// but SavedFrame chains are built from oldest to youngest, to ensure common
+// tails are shared. This means that capturing a stack is necessarily a
+// two-phase process: walk the stack, and then build the SavedFrames.
+//
+// Naturally, the first time we capture the stack, the cache is empty, and we
+// must traverse the entire stack. As we build each SavedFrame, we push an entry
+// associating the frame's address to its SavedFrame on the cache, and set the
+// frame's bit. At the end, every frame has its bit set and an entry in the
+// cache.
+//
+// Then the program runs some more. Some, none, or all of the frames are popped.
+// Any new frames are pushed with their bit clear. Any frame with its bit set
+// has never left the stack. The cache is left untouched.
+//
+// For the next capture, we walk the stack up to the first frame with its bit
+// set, if there is one. Call it F; it must have a cache entry. We pop entries
+// from the cache - all invalid, because they are above F's entry, and hence
+// younger - until we find the entry matching F's address. Since F's bit is set,
+// we know it never left the stack, and hence that no younger frame could have
+// had a colliding address. And since the frame's bit was set when we pushed the
+// cache entry, we know the entry is still valid.
+//
+// F's cache entry's SavedFrame covers the rest of the stack, so we don't need
+// to walk the stack any further. Now we begin building SavedFrame objects for
+// the new frames, pushing cache entries, and setting bits on the frames. By the
+// end, the cache again covers the full stack, and every frame's bit is set.
+//
+// If we walk the stack to the end, and find no frame with its bit set, then the
+// entire cache is invalid. At this point, it must be emptied, so that the new
+// entries we are about to push are the only frames in the cache.
+//
+// For example, suppose we have the following stack (let 'A > B' mean "A called
+// B", so the frames are listed oldest first):
+//
+// P > Q > R > S Initial stack, bits not set.
+// P* > Q* > R* > S* Capture a SavedFrame stack, set bits.
+// The cache now holds: P > Q > R > S.
+// P* > Q* > R* Return from S.
+// P* > Q* Return from R.
+// P* > Q* > T > U Call T and U. New frames have clear bits.
+//
+// If we capture the stack now, the cache still holds:
+//
+// P > Q > R > S
+//
+// As we traverse the stack, we'll cross U and T, and then find Q with its bit
+// set. We pop entries from the cache until we find the entry for Q; this
+// removes entries R and S, which were indeed invalid. In Q's cache entry, we
+// find the SavedFrame representing the stack P > Q. Now we build SavedFrames
+// for the new portion of the stack, pushing an entry for T and setting the bit
+// on the frame, and then doing the same for U. In the end, the call stack again
+// has bits set on all its frames:
+//
+// P* > Q* > T* > U* All frames are now in the cache.
+//
+// And the cache again holds entries for the entire stack:
+//
+// P > Q > T > U
+//
+// Details:
+//
+// - When we find a cache entry whose frame address matches our frame F, we know
+// that F has never left the stack, but it may certainly be the case that
+// execution took place in that frame, and that the current source position
+// within F's function has changed. This means that the entry's SavedFrame,
+// which records the source line and column as well as the function, is not
+// correct. To detect this case, when we push a cache entry, we record the
+// frame's pc. When consulting the cache, if a frame's address matches but its
+// pc does not, then we pop the cache entry, clear the frame's bit, and
+// continue walking the stack. The next stack frame will definitely hit: since
+// its callee frame never left the stack, the calling frame never got the
+// chance to execute.
+//
+// - Generators, at least conceptually, have long-lived stack frames that
+// disappear from the stack when the generator yields, and reappear on the
+// stack when the generator's 'next' method is called. When a generator's
+// frame is placed again atop the stack, its bit must be cleared - for the
+// purposes of the cache, treating the frame as a new frame - to respect the
+// invariants we used to justify the algorithm above. Async function
+// activations usually appear atop empty stacks, since they are invoked as a
+// promise callback, but the same rule applies.
+//
+// - SpiderMonkey has many types of stack frames, and not all have a place to
+// store a bit indicating a cached SavedFrame. But as long as we don't create
+// cache entries for frames we can't mark, simply omitting them from the cache
+// is harmless. Uncacheable frame types include inlined Ion frames and
+// non-Debug wasm frames. The LiveSavedFrameCache::FramePtr type represents
+// only pointers to frames that can be cached, so if you have a FramePtr, you
+// don't need to further check the frame for cachability. FramePtr provides
+// access to the hasCachedSavedFrame bit.
+//
+// - We actually break up the cache into one cache per Activation. Popping an
+// activation invalidates all its cache entries, simply by freeing the cache
+// altogether.
+//
+// - The entire chain of SavedFrames for a given stack capture is created in the
+// compartment of the code that requested the capture, *not* in that of the
+// frames it represents, so in general, different compartments may have
+// different SavedFrame objects representing the same actual stack frame. The
+// LiveSavedFrameCache simply records whichever SavedFrames were used in the
+// most recent captures. When we find a cache hit, we check the entry's
+// SavedFrame's compartment against the current compartment; if they do not
+// match, we clear the entire cache.
+//
+// This means that it is not always true that, if a frame's
+// hasCachedSavedFrame bit is set, it must have an entry in the cache. The
+// actual invariant is: either the cache is completely empty, or the frames'
+// bits are trustworthy. This invariant holds even though capture can be
+// interrupted at many places by OOM failures. Clearing the cache is a single,
+// uninterruptible step. When we try to look up a frame whose bit is set and
+// find an empty cache, we clear the frame's bit. And we only add the first
+// frame to an empty cache once we've walked the stack all the way, so we know
+// that all frames' bits are cleared by that point.
+//
+// - When the Debugger API evaluates an expression in some frame (the 'target
+// frame'), it's SpiderMonkey's convention that the target frame be treated as
+// the parent of the eval frame. In reality, of course, the eval frame is
+// pushed on the top of the stack like any other frame, but stack captures
+// simply jump straight over the intervening frames, so that the '.parent'
+// property of a SavedFrame for the eval is the SavedFrame for the target.
+// This is arranged by giving the eval frame an 'evalInFramePrev` link
+// pointing to the target, which an ordinary FrameIter will notice and
+// respect.
+//
+// If the LiveSavedFrameCache were presented with stack traversals that
+// skipped frames in this way, it would cause havoc. First, with no debugger
+// eval frames present, capture the stack, populating the cache. Then push a
+// debugger eval frame and capture again; the skipped frames to appear to be
+// absent from the stack. Now pop the debugger eval frame, and capture a third
+// time: the no-longer-skipped frames seem to reappear on the stack, with
+// their cached bits still set.
+//
+// The LiveSavedFrameCache assumes that the stack it sees is used in a
+// stack-like fashion: if a frame has its bit set, it has never left the
+// stack. To support this assumption, when the cache is in use, we do not skip
+// the frames between a debugger eval frame an its target; we always traverse
+// the entire stack, invalidating and populating the cache in the usual way.
+// Instead, when we construct a SavedFrame for a debugger eval frame, we
+// select the appropriate parent at that point: rather than the next-older
+// frame, we find the SavedFrame for the eval's target frame. The skip appears
+// in the SavedFrame chains, even as the traversal covers all the frames.
+//
+// - Rematerialized frames (see ../jit/RematerializedFrame.h) are always created
+// with their hasCachedSavedFrame bits clear: although there may be extant
+// SavedFrames built from the original IonMonkey frame, the Rematerialized
+// frames will not have cache entries for them until they are traversed in a
+// capture themselves.
+//
+// This means that, oddly, it is not always true that, once we reach a frame
+// with its hasCachedSavedFrame bit set, all its parents will have the bit set
+// as well. However, clear bits under younger set bits will only occur on
+// Rematerialized frames.
+class LiveSavedFrameCache {
+ public:
+ // The address of a live frame for which we can cache SavedFrames: it has a
+ // 'hasCachedSavedFrame' bit we can examine and set, and can be converted to
+ // a Key to index the cache.
+ class FramePtr {
+ // We use jit::CommonFrameLayout for both Baseline frames and Ion
+ // physical frames.
+ using Ptr = mozilla::Variant<InterpreterFrame*, jit::CommonFrameLayout*,
+ jit::RematerializedFrame*, wasm::DebugFrame*>;
+
+ Ptr ptr;
+
+ template <typename Frame>
+ explicit FramePtr(Frame ptr) : ptr(ptr) {}
+
+ struct HasCachedMatcher;
+ struct SetHasCachedMatcher;
+ struct ClearHasCachedMatcher;
+
+ public:
+ // If iter's frame is of a type that can be cached, construct a FramePtr
+ // for its frame. Otherwise, return Nothing.
+ static inline mozilla::Maybe<FramePtr> create(const FrameIter& iter);
+
+ inline bool hasCachedSavedFrame() const;
+ inline void setHasCachedSavedFrame();
+ inline void clearHasCachedSavedFrame();
+
+ // Return true if this FramePtr refers to an interpreter frame.
+ inline bool isInterpreterFrame() const {
+ return ptr.is<InterpreterFrame*>();
+ }
+
+ // If this FramePtr is an interpreter frame, return a pointer to it.
+ inline InterpreterFrame& asInterpreterFrame() const {
+ return *ptr.as<InterpreterFrame*>();
+ }
+
+ // Return true if this FramePtr refers to a rematerialized frame.
+ inline bool isRematerializedFrame() const {
+ return ptr.is<jit::RematerializedFrame*>();
+ }
+
+ bool operator==(const FramePtr& rhs) const { return rhs.ptr == this->ptr; }
+ bool operator!=(const FramePtr& rhs) const { return !(rhs == *this); }
+ };
+
+ private:
+ // A key in the cache: the address of a frame, live or dead, for which we
+ // can cache SavedFrames. Since the pointer may not be live, the only
+ // operation this type permits is comparison.
+ class Key {
+ FramePtr framePtr;
+
+ public:
+ MOZ_IMPLICIT Key(const FramePtr& framePtr) : framePtr(framePtr) {}
+
+ bool operator==(const Key& rhs) const {
+ return rhs.framePtr == this->framePtr;
+ }
+ bool operator!=(const Key& rhs) const { return !(rhs == *this); }
+ };
+
+ struct Entry {
+ const Key key;
+ const jsbytecode* pc;
+ HeapPtr<SavedFrame*> savedFrame;
+
+ Entry(const Key& key, const jsbytecode* pc, SavedFrame* savedFrame)
+ : key(key), pc(pc), savedFrame(savedFrame) {}
+ };
+
+ using EntryVector = Vector<Entry, 0, SystemAllocPolicy>;
+ EntryVector* frames;
+
+ LiveSavedFrameCache(const LiveSavedFrameCache&) = delete;
+ LiveSavedFrameCache& operator=(const LiveSavedFrameCache&) = delete;
+
+ public:
+ explicit LiveSavedFrameCache() : frames(nullptr) {}
+
+ LiveSavedFrameCache(LiveSavedFrameCache&& rhs) : frames(rhs.frames) {
+ MOZ_ASSERT(this != &rhs, "self-move disallowed");
+ rhs.frames = nullptr;
+ }
+
+ ~LiveSavedFrameCache() {
+ if (frames) {
+ js_delete(frames);
+ frames = nullptr;
+ }
+ }
+
+ bool initialized() const { return !!frames; }
+ bool init(JSContext* cx) {
+ frames = js_new<EntryVector>();
+ if (!frames) {
+ JS_ReportOutOfMemory(cx);
+ return false;
+ }
+ return true;
+ }
+
+ void trace(JSTracer* trc);
+
+ // Set |frame| to the cached SavedFrame corresponding to |framePtr| at |pc|.
+ // |framePtr|'s hasCachedSavedFrame bit must be set. Remove all cache
+ // entries for frames younger than that one.
+ //
+ // This may set |frame| to nullptr if |pc| is different from the pc supplied
+ // when the cache entry was inserted. In this case, the cached SavedFrame
+ // (probably) has the wrong source position. Entries for younger frames are
+ // still removed. The next frame, if any, will be a cache hit.
+ //
+ // This may also set |frame| to nullptr if the cache was populated with
+ // SavedFrame objects for a different compartment than cx's current
+ // compartment. In this case, the entire cache is flushed.
+ void find(JSContext* cx, FramePtr& framePtr, const jsbytecode* pc,
+ MutableHandle<SavedFrame*> frame) const;
+
+ // Search the cache for a frame matching |framePtr|, without removing any
+ // entries. Return the matching saved frame, or nullptr if none is found.
+ // This is used for resolving |evalInFramePrev| links.
+ void findWithoutInvalidation(const FramePtr& framePtr,
+ MutableHandle<SavedFrame*> frame) const;
+
+ // Push a cache entry mapping |framePtr| and |pc| to |savedFrame| on the top
+ // of the cache's stack. You must insert entries for frames from oldest to
+ // youngest. They must all be younger than the frame that the |find| method
+ // found a hit for; or you must have cleared the entire cache with the
+ // |clear| method.
+ bool insert(JSContext* cx, FramePtr&& framePtr, const jsbytecode* pc,
+ Handle<SavedFrame*> savedFrame);
+
+ // Remove all entries from the cache.
+ void clear() {
+ if (frames) frames->clear();
+ }
+};
+
+static_assert(
+ sizeof(LiveSavedFrameCache) == sizeof(uintptr_t),
+ "Every js::Activation has a LiveSavedFrameCache, so we need to be pretty "
+ "careful "
+ "about avoiding bloat. If you're adding members to LiveSavedFrameCache, "
+ "maybe you "
+ "should consider figuring out a way to make js::Activation have a "
+ "LiveSavedFrameCache* instead of a Rooted<LiveSavedFrameCache>.");
+
+class Activation {
+ protected:
+ JSContext* cx_;
+ JS::Compartment* compartment_;
+ Activation* prev_;
+ Activation* prevProfiling_;
+
+ // Counter incremented by JS::HideScriptedCaller and decremented by
+ // JS::UnhideScriptedCaller. If > 0 for the top activation,
+ // DescribeScriptedCaller will return null instead of querying that
+ // activation, which should prompt the caller to consult embedding-specific
+ // data structures instead.
+ size_t hideScriptedCallerCount_;
+
+ // The cache of SavedFrame objects we have already captured when walking
+ // this activation's stack.
+ JS::Rooted<LiveSavedFrameCache> frameCache_;
+
+ // Youngest saved frame of an async stack that will be iterated during stack
+ // capture in place of the actual stack of previous activations. Note that
+ // the stack of this activation is captured entirely before this is used.
+ //
+ // Usually this is nullptr, meaning that normal stack capture will occur.
+ // When this is set, the stack of any previous activation is ignored.
+ JS::Rooted<SavedFrame*> asyncStack_;
+
+ // Value of asyncCause to be attached to asyncStack_.
+ const char* asyncCause_;
+
+ // True if the async call was explicitly requested, e.g. via
+ // callFunctionWithAsyncStack.
+ bool asyncCallIsExplicit_;
+
+ enum Kind { Interpreter, Jit };
+ Kind kind_;
+
+ inline Activation(JSContext* cx, Kind kind);
+ inline ~Activation();
+
+ public:
+ JSContext* cx() const { return cx_; }
+ JS::Compartment* compartment() const { return compartment_; }
+ Activation* prev() const { return prev_; }
+ Activation* prevProfiling() const { return prevProfiling_; }
+ inline Activation* mostRecentProfiling();
+
+ bool isInterpreter() const { return kind_ == Interpreter; }
+ bool isJit() const { return kind_ == Jit; }
+ inline bool hasWasmExitFP() const;
+
+ inline bool isProfiling() const;
+ void registerProfiling();
+ void unregisterProfiling();
+
+ InterpreterActivation* asInterpreter() const {
+ MOZ_ASSERT(isInterpreter());
+ return (InterpreterActivation*)this;
+ }
+ jit::JitActivation* asJit() const {
+ MOZ_ASSERT(isJit());
+ return (jit::JitActivation*)this;
+ }
+
+ void hideScriptedCaller() { hideScriptedCallerCount_++; }
+ void unhideScriptedCaller() {
+ MOZ_ASSERT(hideScriptedCallerCount_ > 0);
+ hideScriptedCallerCount_--;
+ }
+ bool scriptedCallerIsHidden() const { return hideScriptedCallerCount_ > 0; }
+
+ SavedFrame* asyncStack() { return asyncStack_; }
+
+ const char* asyncCause() const { return asyncCause_; }
+
+ bool asyncCallIsExplicit() const { return asyncCallIsExplicit_; }
+
+ inline LiveSavedFrameCache* getLiveSavedFrameCache(JSContext* cx);
+ void clearLiveSavedFrameCache() { frameCache_.get().clear(); }
+
+ private:
+ Activation(const Activation& other) = delete;
+ void operator=(const Activation& other) = delete;
+};
+
+// This variable holds a special opcode value which is greater than all normal
+// opcodes, and is chosen such that the bitwise or of this value with any
+// opcode is this value.
+constexpr jsbytecode EnableInterruptsPseudoOpcode = -1;
+
+static_assert(EnableInterruptsPseudoOpcode >= JSOP_LIMIT,
+ "EnableInterruptsPseudoOpcode must be greater than any opcode");
+static_assert(
+ EnableInterruptsPseudoOpcode == jsbytecode(-1),
+ "EnableInterruptsPseudoOpcode must be the maximum jsbytecode value");
+
+class InterpreterFrameIterator;
+class RunState;
+
+class InterpreterActivation : public Activation {
+ friend class js::InterpreterFrameIterator;
+
+ InterpreterRegs regs_;
+ InterpreterFrame* entryFrame_;
+ size_t opMask_; // For debugger interrupts, see js::Interpret.
+
+#ifdef DEBUG
+ size_t oldFrameCount_;
+#endif
+
+ public:
+ inline InterpreterActivation(RunState& state, JSContext* cx,
+ InterpreterFrame* entryFrame);
+ inline ~InterpreterActivation();
+
+ inline bool pushInlineFrame(const JS::CallArgs& args,
+ JS::Handle<JSScript*> script,
+ MaybeConstruct constructing);
+ inline void popInlineFrame(InterpreterFrame* frame);
+
+ inline bool resumeGeneratorFrame(JS::Handle<JSFunction*> callee,
+ JS::Handle<JSObject*> envChain);
+
+ InterpreterFrame* current() const { return regs_.fp(); }
+ InterpreterRegs& regs() { return regs_; }
+ InterpreterFrame* entryFrame() const { return entryFrame_; }
+ size_t opMask() const { return opMask_; }
+
+ bool isProfiling() const { return false; }
+
+ // If this js::Interpret frame is running |script|, enable interrupts.
+ void enableInterruptsIfRunning(JSScript* script) {
+ if (regs_.fp()->script() == script) {
+ enableInterruptsUnconditionally();
+ }
+ }
+ void enableInterruptsUnconditionally() {
+ opMask_ = EnableInterruptsPseudoOpcode;
+ }
+ void clearInterruptsMask() { opMask_ = 0; }
+};
+
+// Iterates over a thread's activation list.
+class ActivationIterator {
+ protected:
+ Activation* activation_;
+
+ public:
+ explicit ActivationIterator(JSContext* cx);
+
+ ActivationIterator& operator++();
+
+ Activation* operator->() const { return activation_; }
+ Activation* activation() const { return activation_; }
+ bool done() const { return activation_ == nullptr; }
+};
+
+} // namespace js
+
+#endif // vm_Activation_h
diff --git a/js/src/vm/ArgumentsObject-inl.h b/js/src/vm/ArgumentsObject-inl.h
new file mode 100644
index 0000000000..4ef7493f79
--- /dev/null
+++ b/js/src/vm/ArgumentsObject-inl.h
@@ -0,0 +1,58 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ArgumentsObject_inl_h
+#define vm_ArgumentsObject_inl_h
+
+#include "vm/ArgumentsObject.h"
+
+#include "vm/EnvironmentObject.h"
+
+#include "vm/EnvironmentObject-inl.h"
+
+namespace js {
+
+inline const Value& ArgumentsObject::element(uint32_t i) const {
+ MOZ_ASSERT(isElement(i));
+ const Value& v = data()->args[i];
+ if (IsMagicScopeSlotValue(v)) {
+ CallObject& callobj =
+ getFixedSlot(MAYBE_CALL_SLOT).toObject().as<CallObject>();
+ return callobj.aliasedFormalFromArguments(v);
+ }
+ return v;
+}
+
+inline void ArgumentsObject::setElement(uint32_t i, const Value& v) {
+ MOZ_ASSERT(isElement(i));
+ GCPtr<Value>& lhs = data()->args[i];
+ if (IsMagicScopeSlotValue(lhs)) {
+ CallObject& callobj =
+ getFixedSlot(MAYBE_CALL_SLOT).toObject().as<CallObject>();
+ callobj.setAliasedFormalFromArguments(lhs, v);
+ } else {
+ lhs = v;
+ }
+}
+
+inline bool ArgumentsObject::maybeGetElements(uint32_t start, uint32_t count,
+ Value* vp) {
+ MOZ_ASSERT(start + count >= start);
+
+ uint32_t length = initialLength();
+ if (start > length || start + count > length || hasOverriddenElement()) {
+ return false;
+ }
+
+ for (uint32_t i = start, end = start + count; i < end; ++i, ++vp) {
+ *vp = element(i);
+ }
+ return true;
+}
+
+} /* namespace js */
+
+#endif /* vm_ArgumentsObject_inl_h */
diff --git a/js/src/vm/ArgumentsObject.cpp b/js/src/vm/ArgumentsObject.cpp
new file mode 100644
index 0000000000..3a058e0df8
--- /dev/null
+++ b/js/src/vm/ArgumentsObject.cpp
@@ -0,0 +1,1182 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/ArgumentsObject-inl.h"
+
+#include "mozilla/Maybe.h"
+#include "mozilla/PodOperations.h"
+
+#include <algorithm>
+
+#include "gc/GCContext.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "util/BitArray.h"
+#include "vm/GlobalObject.h"
+#include "vm/Stack.h"
+#include "vm/WellKnownAtom.h" // js_*_str
+
+#include "gc/Nursery-inl.h"
+#include "vm/FrameIter-inl.h" // js::FrameIter::unaliasedForEachActual
+#include "vm/NativeObject-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+
+/* static */
+size_t RareArgumentsData::bytesRequired(size_t numActuals) {
+ size_t extraBytes = NumWordsForBitArrayOfLength(numActuals) * sizeof(size_t);
+ return offsetof(RareArgumentsData, deletedBits_) + extraBytes;
+}
+
+/* static */
+RareArgumentsData* RareArgumentsData::create(JSContext* cx,
+ ArgumentsObject* obj) {
+ size_t bytes = RareArgumentsData::bytesRequired(obj->initialLength());
+
+ uint8_t* data = AllocateObjectBuffer<uint8_t>(cx, obj, bytes);
+ if (!data) {
+ return nullptr;
+ }
+
+ mozilla::PodZero(data, bytes);
+
+ AddCellMemory(obj, bytes, MemoryUse::RareArgumentsData);
+
+ return new (data) RareArgumentsData();
+}
+
+bool ArgumentsObject::createRareData(JSContext* cx) {
+ MOZ_ASSERT(!data()->rareData);
+
+ RareArgumentsData* rareData = RareArgumentsData::create(cx, this);
+ if (!rareData) {
+ return false;
+ }
+
+ data()->rareData = rareData;
+ markElementOverridden();
+ return true;
+}
+
+bool ArgumentsObject::markElementDeleted(JSContext* cx, uint32_t i) {
+ RareArgumentsData* data = getOrCreateRareData(cx);
+ if (!data) {
+ return false;
+ }
+
+ data->markElementDeleted(initialLength(), i);
+ return true;
+}
+
+/* static */
+void ArgumentsObject::MaybeForwardToCallObject(AbstractFramePtr frame,
+ ArgumentsObject* obj,
+ ArgumentsData* data) {
+ JSScript* script = frame.script();
+ if (frame.callee()->needsCallObject() && script->argsObjAliasesFormals()) {
+ obj->initFixedSlot(MAYBE_CALL_SLOT, ObjectValue(frame.callObj()));
+ for (PositionalFormalParameterIter fi(script); fi; fi++) {
+ if (fi.closedOver()) {
+ data->args[fi.argumentSlot()] = MagicEnvSlotValue(fi.location().slot());
+ obj->markArgumentForwarded();
+ }
+ }
+ }
+}
+
+/* static */
+void ArgumentsObject::MaybeForwardToCallObject(JSFunction* callee,
+ JSObject* callObj,
+ ArgumentsObject* obj,
+ ArgumentsData* data) {
+ JSScript* script = callee->nonLazyScript();
+ if (callee->needsCallObject() && script->argsObjAliasesFormals()) {
+ MOZ_ASSERT(callObj && callObj->is<CallObject>());
+ obj->initFixedSlot(MAYBE_CALL_SLOT, ObjectValue(*callObj));
+ for (PositionalFormalParameterIter fi(script); fi; fi++) {
+ if (fi.closedOver()) {
+ data->args[fi.argumentSlot()] = MagicEnvSlotValue(fi.location().slot());
+ obj->markArgumentForwarded();
+ }
+ }
+ }
+}
+
+struct CopyFrameArgs {
+ AbstractFramePtr frame_;
+
+ explicit CopyFrameArgs(AbstractFramePtr frame) : frame_(frame) {}
+
+ void copyActualArgs(GCPtr<Value>* dst, unsigned numActuals) const {
+ MOZ_ASSERT_IF(frame_.isInterpreterFrame(),
+ !frame_.asInterpreterFrame()->runningInJit());
+
+ // Copy arguments.
+ Value* src = frame_.argv();
+ Value* end = src + numActuals;
+ while (src != end) {
+ (dst++)->init(*src++);
+ }
+ }
+
+ /*
+ * If a call object exists and the arguments object aliases formals, the
+ * call object is the canonical location for formals.
+ */
+ void maybeForwardToCallObject(ArgumentsObject* obj, ArgumentsData* data) {
+ ArgumentsObject::MaybeForwardToCallObject(frame_, obj, data);
+ }
+};
+
+struct CopyJitFrameArgs {
+ jit::JitFrameLayout* frame_;
+ HandleObject callObj_;
+
+ CopyJitFrameArgs(jit::JitFrameLayout* frame, HandleObject callObj)
+ : frame_(frame), callObj_(callObj) {}
+
+ void copyActualArgs(GCPtr<Value>* dst, unsigned numActuals) const {
+ MOZ_ASSERT(frame_->numActualArgs() == numActuals);
+
+ Value* src = frame_->actualArgs();
+ Value* end = src + numActuals;
+ while (src != end) {
+ (dst++)->init(*src++);
+ }
+ }
+
+ /*
+ * If a call object exists and the arguments object aliases formals, the
+ * call object is the canonical location for formals.
+ */
+ void maybeForwardToCallObject(ArgumentsObject* obj, ArgumentsData* data) {
+ JSFunction* callee = jit::CalleeTokenToFunction(frame_->calleeToken());
+ ArgumentsObject::MaybeForwardToCallObject(callee, callObj_, obj, data);
+ }
+};
+
+struct CopyScriptFrameIterArgs {
+ ScriptFrameIter& iter_;
+ RootedValueVector actualArgs_;
+
+ explicit CopyScriptFrameIterArgs(JSContext* cx, ScriptFrameIter& iter)
+ : iter_(iter), actualArgs_(cx) {}
+
+ // Used to copy arguments to actualArgs_ to simplify copyArgs and
+ // ArgumentsObject allocation.
+ [[nodiscard]] bool init(JSContext* cx) {
+ unsigned numActuals = iter_.numActualArgs();
+ if (!actualArgs_.reserve(numActuals)) {
+ return false;
+ }
+
+ // Append actual arguments.
+ iter_.unaliasedForEachActual(
+ cx, [this](const Value& v) { actualArgs_.infallibleAppend(v); });
+ MOZ_RELEASE_ASSERT(actualArgs_.length() == numActuals);
+ return true;
+ }
+
+ void copyActualArgs(GCPtr<Value>* dst, unsigned numActuals) const {
+ MOZ_ASSERT(actualArgs_.length() == numActuals);
+
+ for (Value v : actualArgs_) {
+ (dst++)->init(v);
+ }
+ }
+
+ /*
+ * Ion frames are copying every argument onto the stack, other locations are
+ * invalid.
+ */
+ void maybeForwardToCallObject(ArgumentsObject* obj, ArgumentsData* data) {
+ if (!iter_.isIon()) {
+ ArgumentsObject::MaybeForwardToCallObject(iter_.abstractFramePtr(), obj,
+ data);
+ }
+ }
+};
+
+struct CopyInlinedArgs {
+ HandleValueArray args_;
+ HandleObject callObj_;
+ HandleFunction callee_;
+
+ CopyInlinedArgs(HandleValueArray args, HandleObject callObj,
+ HandleFunction callee)
+ : args_(args), callObj_(callObj), callee_(callee) {}
+
+ void copyActualArgs(GCPtr<Value>* dst, unsigned numActuals) const {
+ MOZ_ASSERT(numActuals <= args_.length());
+
+ for (uint32_t i = 0; i < numActuals; i++) {
+ (dst++)->init(args_[i]);
+ }
+ }
+
+ /*
+ * If a call object exists and the arguments object aliases formals, the
+ * call object is the canonical location for formals.
+ */
+ void maybeForwardToCallObject(ArgumentsObject* obj, ArgumentsData* data) {
+ ArgumentsObject::MaybeForwardToCallObject(callee_, callObj_, obj, data);
+ }
+};
+
+ArgumentsObject* ArgumentsObject::createTemplateObject(JSContext* cx,
+ bool mapped) {
+ const JSClass* clasp = mapped ? &MappedArgumentsObject::class_
+ : &UnmappedArgumentsObject::class_;
+
+ RootedObject proto(cx, &cx->global()->getObjectPrototype());
+
+ constexpr ObjectFlags objectFlags = {ObjectFlag::Indexed};
+ Rooted<SharedShape*> shape(cx, SharedShape::getInitialShape(
+ cx, clasp, cx->realm(), TaggedProto(proto),
+ FINALIZE_KIND, objectFlags));
+ if (!shape) {
+ return nullptr;
+ }
+
+ AutoSetNewObjectMetadata metadata(cx);
+ JSObject* base =
+ NativeObject::create(cx, FINALIZE_KIND, gc::Heap::Tenured, shape);
+ if (!base) {
+ return nullptr;
+ }
+
+ ArgumentsObject* obj = &base->as<js::ArgumentsObject>();
+ obj->initFixedSlot(ArgumentsObject::DATA_SLOT, PrivateValue(nullptr));
+ return obj;
+}
+
+ArgumentsObject* GlobalObject::maybeArgumentsTemplateObject(bool mapped) const {
+ return mapped ? data().mappedArgumentsTemplate
+ : data().unmappedArgumentsTemplate;
+}
+
+/* static */
+ArgumentsObject* GlobalObject::getOrCreateArgumentsTemplateObject(JSContext* cx,
+ bool mapped) {
+ GlobalObjectData& data = cx->global()->data();
+ HeapPtr<ArgumentsObject*>& obj =
+ mapped ? data.mappedArgumentsTemplate : data.unmappedArgumentsTemplate;
+
+ ArgumentsObject* templateObj = obj;
+ if (templateObj) {
+ return templateObj;
+ }
+
+ templateObj = ArgumentsObject::createTemplateObject(cx, mapped);
+ if (!templateObj) {
+ return nullptr;
+ }
+
+ obj.init(templateObj);
+ return templateObj;
+}
+
+template <typename CopyArgs>
+/* static */
+ArgumentsObject* ArgumentsObject::create(JSContext* cx, HandleFunction callee,
+ unsigned numActuals, CopyArgs& copy) {
+ // Self-hosted code should use the more efficient ArgumentsLength and
+ // GetArgument intrinsics instead of `arguments`.
+ MOZ_ASSERT(!callee->isSelfHostedBuiltin());
+
+ bool mapped = callee->baseScript()->hasMappedArgsObj();
+ ArgumentsObject* templateObj =
+ GlobalObject::getOrCreateArgumentsTemplateObject(cx, mapped);
+ if (!templateObj) {
+ return nullptr;
+ }
+
+ Rooted<SharedShape*> shape(cx, templateObj->sharedShape());
+
+ unsigned numFormals = callee->nargs();
+ unsigned numArgs = std::max(numActuals, numFormals);
+ unsigned numBytes = ArgumentsData::bytesRequired(numArgs);
+
+ AutoSetNewObjectMetadata metadata(cx);
+ JSObject* base =
+ NativeObject::create(cx, FINALIZE_KIND, gc::Heap::Default, shape);
+ if (!base) {
+ return nullptr;
+ }
+ ArgumentsObject* obj = &base->as<ArgumentsObject>();
+
+ ArgumentsData* data = reinterpret_cast<ArgumentsData*>(
+ AllocateObjectBuffer<uint8_t>(cx, obj, numBytes));
+ if (!data) {
+ // Make the object safe for GC.
+ obj->initFixedSlot(DATA_SLOT, PrivateValue(nullptr));
+ return nullptr;
+ }
+
+ data->numArgs = numArgs;
+ data->rareData = nullptr;
+
+ InitReservedSlot(obj, DATA_SLOT, data, numBytes, MemoryUse::ArgumentsData);
+ obj->initFixedSlot(CALLEE_SLOT, ObjectValue(*callee));
+ obj->initFixedSlot(INITIAL_LENGTH_SLOT,
+ Int32Value(numActuals << PACKED_BITS_COUNT));
+
+ // Copy [0, numActuals) into data->args.
+ GCPtr<Value>* args = data->args;
+ copy.copyActualArgs(args, numActuals);
+
+ // Fill in missing arguments with |undefined|.
+ for (size_t i = numActuals; i < numArgs; i++) {
+ args[i].init(UndefinedValue());
+ }
+
+ copy.maybeForwardToCallObject(obj, data);
+
+ MOZ_ASSERT(obj->initialLength() == numActuals);
+ MOZ_ASSERT(!obj->hasOverriddenLength());
+ return obj;
+}
+
+ArgumentsObject* ArgumentsObject::createExpected(JSContext* cx,
+ AbstractFramePtr frame) {
+ MOZ_ASSERT(frame.script()->needsArgsObj());
+ RootedFunction callee(cx, frame.callee());
+ CopyFrameArgs copy(frame);
+ ArgumentsObject* argsobj = create(cx, callee, frame.numActualArgs(), copy);
+ if (!argsobj) {
+ return nullptr;
+ }
+
+ frame.initArgsObj(*argsobj);
+ return argsobj;
+}
+
+ArgumentsObject* ArgumentsObject::createUnexpected(JSContext* cx,
+ ScriptFrameIter& iter) {
+ RootedFunction callee(cx, iter.callee(cx));
+ CopyScriptFrameIterArgs copy(cx, iter);
+ if (!copy.init(cx)) {
+ return nullptr;
+ }
+ return create(cx, callee, iter.numActualArgs(), copy);
+}
+
+ArgumentsObject* ArgumentsObject::createUnexpected(JSContext* cx,
+ AbstractFramePtr frame) {
+ RootedFunction callee(cx, frame.callee());
+ CopyFrameArgs copy(frame);
+ return create(cx, callee, frame.numActualArgs(), copy);
+}
+
+ArgumentsObject* ArgumentsObject::createForIon(JSContext* cx,
+ jit::JitFrameLayout* frame,
+ HandleObject scopeChain) {
+ jit::CalleeToken token = frame->calleeToken();
+ MOZ_ASSERT(jit::CalleeTokenIsFunction(token));
+ RootedFunction callee(cx, jit::CalleeTokenToFunction(token));
+ RootedObject callObj(
+ cx, scopeChain->is<CallObject>() ? scopeChain.get() : nullptr);
+ CopyJitFrameArgs copy(frame, callObj);
+ return create(cx, callee, frame->numActualArgs(), copy);
+}
+
+/* static */
+ArgumentsObject* ArgumentsObject::createFromValueArray(
+ JSContext* cx, HandleValueArray argsArray, HandleFunction callee,
+ HandleObject scopeChain, uint32_t numActuals) {
+ MOZ_ASSERT(numActuals <= MaxInlinedArgs);
+ RootedObject callObj(
+ cx, scopeChain->is<CallObject>() ? scopeChain.get() : nullptr);
+ CopyInlinedArgs copy(argsArray, callObj, callee);
+ return create(cx, callee, numActuals, copy);
+}
+
+/* static */
+ArgumentsObject* ArgumentsObject::createForInlinedIon(JSContext* cx,
+ Value* args,
+ HandleFunction callee,
+ HandleObject scopeChain,
+ uint32_t numActuals) {
+ RootedExternalValueArray rootedArgs(cx, numActuals, args);
+ HandleValueArray argsArray =
+ HandleValueArray::fromMarkedLocation(numActuals, args);
+
+ return createFromValueArray(cx, argsArray, callee, scopeChain, numActuals);
+}
+
+template <typename CopyArgs>
+/* static */
+ArgumentsObject* ArgumentsObject::finishPure(
+ JSContext* cx, ArgumentsObject* obj, JSFunction* callee, JSObject* callObj,
+ unsigned numActuals, CopyArgs& copy) {
+ unsigned numFormals = callee->nargs();
+ unsigned numArgs = std::max(numActuals, numFormals);
+ unsigned numBytes = ArgumentsData::bytesRequired(numArgs);
+
+ ArgumentsData* data = reinterpret_cast<ArgumentsData*>(
+ AllocateObjectBuffer<uint8_t>(cx, obj, numBytes));
+ if (!data) {
+ // Make the object safe for GC. Don't report OOM, the slow path will
+ // retry the allocation.
+ cx->recoverFromOutOfMemory();
+ obj->initFixedSlot(DATA_SLOT, PrivateValue(nullptr));
+ return nullptr;
+ }
+
+ data->numArgs = numArgs;
+ data->rareData = nullptr;
+
+ obj->initFixedSlot(INITIAL_LENGTH_SLOT,
+ Int32Value(numActuals << PACKED_BITS_COUNT));
+ obj->initFixedSlot(DATA_SLOT, PrivateValue(data));
+ AddCellMemory(obj, numBytes, MemoryUse::ArgumentsData);
+ obj->initFixedSlot(MAYBE_CALL_SLOT, UndefinedValue());
+ obj->initFixedSlot(CALLEE_SLOT, ObjectValue(*callee));
+
+ GCPtr<Value>* args = data->args;
+ copy.copyActualArgs(args, numActuals);
+
+ // Fill in missing arguments with |undefined|.
+ for (size_t i = numActuals; i < numArgs; i++) {
+ args[i].init(UndefinedValue());
+ }
+
+ if (callObj && callee->needsCallObject()) {
+ copy.maybeForwardToCallObject(obj, data);
+ }
+
+ MOZ_ASSERT(obj->initialLength() == numActuals);
+ MOZ_ASSERT(!obj->hasOverriddenLength());
+ return obj;
+}
+
+/* static */
+ArgumentsObject* ArgumentsObject::finishForIonPure(JSContext* cx,
+ jit::JitFrameLayout* frame,
+ JSObject* scopeChain,
+ ArgumentsObject* obj) {
+ // JIT code calls this directly (no callVM), because it's faster, so we're
+ // not allowed to GC in here.
+ AutoUnsafeCallWithABI unsafe;
+
+ JSFunction* callee = jit::CalleeTokenToFunction(frame->calleeToken());
+ RootedObject callObj(cx, scopeChain->is<CallObject>() ? scopeChain : nullptr);
+ CopyJitFrameArgs copy(frame, callObj);
+
+ unsigned numActuals = frame->numActualArgs();
+
+ return finishPure(cx, obj, callee, callObj, numActuals, copy);
+}
+
+/* static */
+ArgumentsObject* ArgumentsObject::finishInlineForIonPure(
+ JSContext* cx, JSObject* rawCallObj, JSFunction* rawCallee, Value* args,
+ uint32_t numActuals, ArgumentsObject* obj) {
+ // JIT code calls this directly (no callVM), because it's faster, so we're
+ // not allowed to GC in here.
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(numActuals <= MaxInlinedArgs);
+
+ RootedObject callObj(cx, rawCallObj);
+ RootedFunction callee(cx, rawCallee);
+ RootedExternalValueArray rootedArgs(cx, numActuals, args);
+ HandleValueArray argsArray =
+ HandleValueArray::fromMarkedLocation(numActuals, args);
+
+ CopyInlinedArgs copy(argsArray, callObj, callee);
+
+ return finishPure(cx, obj, callee, callObj, numActuals, copy);
+}
+
+/* static */
+bool ArgumentsObject::obj_delProperty(JSContext* cx, HandleObject obj,
+ HandleId id, ObjectOpResult& result) {
+ ArgumentsObject& argsobj = obj->as<ArgumentsObject>();
+ if (id.isInt()) {
+ unsigned arg = unsigned(id.toInt());
+ if (argsobj.isElement(arg)) {
+ if (!argsobj.markElementDeleted(cx, arg)) {
+ return false;
+ }
+ }
+ } else if (id.isAtom(cx->names().length)) {
+ argsobj.markLengthOverridden();
+ } else if (id.isAtom(cx->names().callee)) {
+ argsobj.as<MappedArgumentsObject>().markCalleeOverridden();
+ } else if (id.isWellKnownSymbol(JS::SymbolCode::iterator)) {
+ argsobj.markIteratorOverridden();
+ }
+ return result.succeed();
+}
+
+/* static */
+bool ArgumentsObject::obj_mayResolve(const JSAtomState& names, jsid id,
+ JSObject*) {
+ // Arguments might resolve indexes, Symbol.iterator, or length/callee.
+ if (id.isAtom()) {
+ JSAtom* atom = id.toAtom();
+ return atom->isIndex() || atom == names.length || atom == names.callee;
+ }
+
+ return id.isInt() || id.isWellKnownSymbol(JS::SymbolCode::iterator);
+}
+
+bool js::MappedArgGetter(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleValue vp) {
+ MappedArgumentsObject& argsobj = obj->as<MappedArgumentsObject>();
+ if (id.isInt()) {
+ /*
+ * arg can exceed the number of arguments if a script changed the
+ * prototype to point to another Arguments object with a bigger argc.
+ */
+ unsigned arg = unsigned(id.toInt());
+ if (argsobj.isElement(arg)) {
+ vp.set(argsobj.element(arg));
+ }
+ } else if (id.isAtom(cx->names().length)) {
+ if (!argsobj.hasOverriddenLength()) {
+ vp.setInt32(argsobj.initialLength());
+ }
+ } else {
+ MOZ_ASSERT(id.isAtom(cx->names().callee));
+ if (!argsobj.hasOverriddenCallee()) {
+ vp.setObject(argsobj.callee());
+ }
+ }
+ return true;
+}
+
+bool js::MappedArgSetter(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue v, ObjectOpResult& result) {
+ Handle<MappedArgumentsObject*> argsobj = obj.as<MappedArgumentsObject>();
+
+ Rooted<mozilla::Maybe<PropertyDescriptor>> desc(cx);
+ if (!GetOwnPropertyDescriptor(cx, argsobj, id, &desc)) {
+ return false;
+ }
+ MOZ_ASSERT(desc.isSome());
+ MOZ_ASSERT(desc->isDataDescriptor());
+ MOZ_ASSERT(desc->writable());
+ MOZ_ASSERT(!desc->resolving());
+
+ if (id.isInt()) {
+ unsigned arg = unsigned(id.toInt());
+ if (argsobj->isElement(arg)) {
+ argsobj->setElement(arg, v);
+ return result.succeed();
+ }
+ } else {
+ MOZ_ASSERT(id.isAtom(cx->names().length) || id.isAtom(cx->names().callee));
+ }
+
+ /*
+ * For simplicity we use delete/define to replace the property with a
+ * simple data property. Note that we rely on ArgumentsObject::obj_delProperty
+ * to set the corresponding override-bit.
+ * Note also that we must define the property instead of setting it in case
+ * the user has changed the prototype to an object that has a setter for
+ * this id.
+ */
+ Rooted<PropertyDescriptor> desc_(cx, *desc);
+ desc_.setValue(v);
+ ObjectOpResult ignored;
+ return NativeDeleteProperty(cx, argsobj, id, ignored) &&
+ NativeDefineProperty(cx, argsobj, id, desc_, result);
+}
+
+/* static */
+bool ArgumentsObject::getArgumentsIterator(JSContext* cx,
+ MutableHandleValue val) {
+ Handle<PropertyName*> shName = cx->names().ArrayValues;
+ Rooted<JSAtom*> name(cx, cx->names().values);
+ return GlobalObject::getSelfHostedFunction(cx, cx->global(), shName, name, 0,
+ val);
+}
+
+/* static */
+bool ArgumentsObject::reifyLength(JSContext* cx, Handle<ArgumentsObject*> obj) {
+ if (obj->hasOverriddenLength()) {
+ return true;
+ }
+
+ RootedId id(cx, NameToId(cx->names().length));
+ RootedValue val(cx, Int32Value(obj->initialLength()));
+ if (!NativeDefineDataProperty(cx, obj, id, val, JSPROP_RESOLVING)) {
+ return false;
+ }
+
+ obj->markLengthOverridden();
+ return true;
+}
+
+/* static */
+bool ArgumentsObject::reifyIterator(JSContext* cx,
+ Handle<ArgumentsObject*> obj) {
+ if (obj->hasOverriddenIterator()) {
+ return true;
+ }
+
+ RootedId iteratorId(cx, PropertyKey::Symbol(cx->wellKnownSymbols().iterator));
+ RootedValue val(cx);
+ if (!ArgumentsObject::getArgumentsIterator(cx, &val)) {
+ return false;
+ }
+ if (!NativeDefineDataProperty(cx, obj, iteratorId, val, JSPROP_RESOLVING)) {
+ return false;
+ }
+
+ obj->markIteratorOverridden();
+ return true;
+}
+
+/* static */
+bool MappedArgumentsObject::reifyCallee(JSContext* cx,
+ Handle<MappedArgumentsObject*> obj) {
+ if (obj->hasOverriddenCallee()) {
+ return true;
+ }
+
+ Rooted<PropertyKey> key(cx, NameToId(cx->names().callee));
+ Rooted<Value> val(cx, ObjectValue(obj->callee()));
+ if (!NativeDefineDataProperty(cx, obj, key, val, JSPROP_RESOLVING)) {
+ return false;
+ }
+
+ obj->markCalleeOverridden();
+ return true;
+}
+
+static bool ResolveArgumentsProperty(JSContext* cx,
+ Handle<ArgumentsObject*> obj, HandleId id,
+ PropertyFlags flags, bool* resolvedp) {
+ MOZ_ASSERT(id.isInt() || id.isAtom(cx->names().length) ||
+ id.isAtom(cx->names().callee));
+ MOZ_ASSERT(flags.isCustomDataProperty());
+
+ if (!NativeObject::addCustomDataProperty(cx, obj, id, flags)) {
+ return false;
+ }
+
+ *resolvedp = true;
+ return true;
+}
+
+/* static */
+bool MappedArgumentsObject::obj_resolve(JSContext* cx, HandleObject obj,
+ HandleId id, bool* resolvedp) {
+ Rooted<MappedArgumentsObject*> argsobj(cx, &obj->as<MappedArgumentsObject>());
+
+ if (id.isWellKnownSymbol(JS::SymbolCode::iterator)) {
+ if (argsobj->hasOverriddenIterator()) {
+ return true;
+ }
+
+ if (!reifyIterator(cx, argsobj)) {
+ return false;
+ }
+ *resolvedp = true;
+ return true;
+ }
+
+ PropertyFlags flags = {PropertyFlag::CustomDataProperty,
+ PropertyFlag::Configurable, PropertyFlag::Writable};
+ if (id.isInt()) {
+ uint32_t arg = uint32_t(id.toInt());
+ if (!argsobj->isElement(arg)) {
+ return true;
+ }
+
+ flags.setFlag(PropertyFlag::Enumerable);
+ } else if (id.isAtom(cx->names().length)) {
+ if (argsobj->hasOverriddenLength()) {
+ return true;
+ }
+ } else {
+ if (!id.isAtom(cx->names().callee)) {
+ return true;
+ }
+
+ if (argsobj->hasOverriddenCallee()) {
+ return true;
+ }
+ }
+
+ return ResolveArgumentsProperty(cx, argsobj, id, flags, resolvedp);
+}
+
+/* static */
+bool MappedArgumentsObject::obj_enumerate(JSContext* cx, HandleObject obj) {
+ Rooted<MappedArgumentsObject*> argsobj(cx, &obj->as<MappedArgumentsObject>());
+
+ RootedId id(cx);
+ bool found;
+
+ // Trigger reflection.
+ id = NameToId(cx->names().length);
+ if (!HasOwnProperty(cx, argsobj, id, &found)) {
+ return false;
+ }
+
+ id = NameToId(cx->names().callee);
+ if (!HasOwnProperty(cx, argsobj, id, &found)) {
+ return false;
+ }
+
+ id = PropertyKey::Symbol(cx->wellKnownSymbols().iterator);
+ if (!HasOwnProperty(cx, argsobj, id, &found)) {
+ return false;
+ }
+
+ for (unsigned i = 0; i < argsobj->initialLength(); i++) {
+ id = PropertyKey::Int(i);
+ if (!HasOwnProperty(cx, argsobj, id, &found)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool DefineMappedIndex(JSContext* cx, Handle<MappedArgumentsObject*> obj,
+ HandleId id,
+ MutableHandle<PropertyDescriptor> desc,
+ ObjectOpResult& result) {
+ // The custom data properties (see MappedArgGetter, MappedArgSetter) have to
+ // be (re)defined manually because PropertyDescriptor and NativeDefineProperty
+ // don't support these special properties.
+ //
+ // This exists in order to let JS code change the configurable/enumerable
+ // attributes for these properties.
+ //
+ // Note: because this preserves the default mapped-arguments behavior, we
+ // don't need to mark elements as overridden or deleted.
+
+ MOZ_ASSERT(id.isInt());
+ MOZ_ASSERT(obj->isElement(id.toInt()));
+ MOZ_ASSERT(!obj->containsDenseElement(id.toInt()));
+
+ MOZ_ASSERT(!desc.isAccessorDescriptor());
+
+ // Mapped properties aren't used when defining a non-writable property.
+ MOZ_ASSERT(!desc.hasWritable() || desc.writable());
+
+ // First, resolve the property to simplify the code below.
+ PropertyResult prop;
+ if (!NativeLookupOwnProperty<CanGC>(cx, obj, id, &prop)) {
+ return false;
+ }
+
+ MOZ_ASSERT(prop.isNativeProperty());
+
+ PropertyInfo propInfo = prop.propertyInfo();
+ MOZ_ASSERT(propInfo.writable());
+ MOZ_ASSERT(propInfo.isCustomDataProperty());
+
+ // Change the property's attributes by implementing the relevant parts of
+ // ValidateAndApplyPropertyDescriptor (ES2021 draft, 10.1.6.3), in particular
+ // steps 4 and 9.
+
+ // Determine whether the property should be configurable and/or enumerable.
+ bool configurable = propInfo.configurable();
+ bool enumerable = propInfo.enumerable();
+ if (configurable) {
+ if (desc.hasConfigurable()) {
+ configurable = desc.configurable();
+ }
+ if (desc.hasEnumerable()) {
+ enumerable = desc.enumerable();
+ }
+ } else {
+ // Property is not configurable so disallow any attribute changes.
+ if ((desc.hasConfigurable() && desc.configurable()) ||
+ (desc.hasEnumerable() && enumerable != desc.enumerable())) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+ }
+
+ PropertyFlags flags = propInfo.flags();
+ flags.setFlag(PropertyFlag::Configurable, configurable);
+ flags.setFlag(PropertyFlag::Enumerable, enumerable);
+ if (!NativeObject::changeCustomDataPropAttributes(cx, obj, id, flags)) {
+ return false;
+ }
+
+ return result.succeed();
+}
+
+// ES 2017 draft 9.4.4.2
+/* static */
+bool MappedArgumentsObject::obj_defineProperty(JSContext* cx, HandleObject obj,
+ HandleId id,
+ Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result) {
+ // Step 1.
+ Rooted<MappedArgumentsObject*> argsobj(cx, &obj->as<MappedArgumentsObject>());
+
+ // Steps 2-3.
+ bool isMapped = false;
+ if (id.isInt()) {
+ unsigned arg = unsigned(id.toInt());
+ isMapped = argsobj->isElement(arg);
+ }
+
+ // Step 4.
+ Rooted<PropertyDescriptor> newArgDesc(cx, desc);
+
+ // Step 5.
+ bool defineMapped = false;
+ if (!desc.isAccessorDescriptor() && isMapped) {
+ // Step 5.a.
+ if (desc.hasWritable() && !desc.writable()) {
+ if (!desc.hasValue()) {
+ RootedValue v(cx, argsobj->element(id.toInt()));
+ newArgDesc.setValue(v);
+ }
+ } else {
+ // In this case the live mapping is supposed to keep working.
+ defineMapped = true;
+ }
+ }
+
+ // Step 6. NativeDefineProperty will lookup [[Value]] for us.
+ if (defineMapped) {
+ if (!DefineMappedIndex(cx, argsobj, id, &newArgDesc, result)) {
+ return false;
+ }
+ } else {
+ if (!NativeDefineProperty(cx, obj.as<NativeObject>(), id, newArgDesc,
+ result)) {
+ return false;
+ }
+ }
+ // Step 7.
+ if (!result.ok()) {
+ return true;
+ }
+
+ // Step 8.
+ if (isMapped) {
+ unsigned arg = unsigned(id.toInt());
+ if (desc.isAccessorDescriptor()) {
+ if (!argsobj->markElementDeleted(cx, arg)) {
+ return false;
+ }
+ } else {
+ if (desc.hasValue()) {
+ argsobj->setElement(arg, desc.value());
+ }
+ if (desc.hasWritable() && !desc.writable()) {
+ if (!argsobj->markElementDeleted(cx, arg)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ // Step 9.
+ return result.succeed();
+}
+
+bool js::UnmappedArgGetter(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleValue vp) {
+ UnmappedArgumentsObject& argsobj = obj->as<UnmappedArgumentsObject>();
+
+ if (id.isInt()) {
+ /*
+ * arg can exceed the number of arguments if a script changed the
+ * prototype to point to another Arguments object with a bigger argc.
+ */
+ unsigned arg = unsigned(id.toInt());
+ if (argsobj.isElement(arg)) {
+ vp.set(argsobj.element(arg));
+ }
+ } else {
+ MOZ_ASSERT(id.isAtom(cx->names().length));
+ if (!argsobj.hasOverriddenLength()) {
+ vp.setInt32(argsobj.initialLength());
+ }
+ }
+ return true;
+}
+
+bool js::UnmappedArgSetter(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue v, ObjectOpResult& result) {
+ Handle<UnmappedArgumentsObject*> argsobj = obj.as<UnmappedArgumentsObject>();
+
+ Rooted<mozilla::Maybe<PropertyDescriptor>> desc(cx);
+ if (!GetOwnPropertyDescriptor(cx, argsobj, id, &desc)) {
+ return false;
+ }
+ MOZ_ASSERT(desc.isSome());
+ MOZ_ASSERT(desc->isDataDescriptor());
+ MOZ_ASSERT(desc->writable());
+ MOZ_ASSERT(!desc->resolving());
+
+ if (id.isInt()) {
+ unsigned arg = unsigned(id.toInt());
+ if (arg < argsobj->initialLength()) {
+ argsobj->setElement(arg, v);
+ return result.succeed();
+ }
+ } else {
+ MOZ_ASSERT(id.isAtom(cx->names().length));
+ }
+
+ /*
+ * For simplicity we use delete/define to replace the property with a
+ * simple data property. Note that we rely on ArgumentsObject::obj_delProperty
+ * to set the corresponding override-bit.
+ */
+ Rooted<PropertyDescriptor> desc_(cx, *desc);
+ desc_.setValue(v);
+ ObjectOpResult ignored;
+ return NativeDeleteProperty(cx, argsobj, id, ignored) &&
+ NativeDefineProperty(cx, argsobj, id, desc_, result);
+}
+
+/* static */
+bool UnmappedArgumentsObject::obj_resolve(JSContext* cx, HandleObject obj,
+ HandleId id, bool* resolvedp) {
+ Rooted<UnmappedArgumentsObject*> argsobj(cx,
+ &obj->as<UnmappedArgumentsObject>());
+
+ if (id.isWellKnownSymbol(JS::SymbolCode::iterator)) {
+ if (argsobj->hasOverriddenIterator()) {
+ return true;
+ }
+
+ if (!reifyIterator(cx, argsobj)) {
+ return false;
+ }
+ *resolvedp = true;
+ return true;
+ }
+
+ if (id.isAtom(cx->names().callee)) {
+ RootedObject throwTypeError(
+ cx, GlobalObject::getOrCreateThrowTypeError(cx, cx->global()));
+ if (!throwTypeError) {
+ return false;
+ }
+
+ unsigned attrs = JSPROP_RESOLVING | JSPROP_PERMANENT;
+ if (!NativeDefineAccessorProperty(cx, argsobj, id, throwTypeError,
+ throwTypeError, attrs)) {
+ return false;
+ }
+
+ *resolvedp = true;
+ return true;
+ }
+
+ PropertyFlags flags = {PropertyFlag::CustomDataProperty,
+ PropertyFlag::Configurable, PropertyFlag::Writable};
+ if (id.isInt()) {
+ uint32_t arg = uint32_t(id.toInt());
+ if (!argsobj->isElement(arg)) {
+ return true;
+ }
+
+ flags.setFlag(PropertyFlag::Enumerable);
+ } else if (id.isAtom(cx->names().length)) {
+ if (argsobj->hasOverriddenLength()) {
+ return true;
+ }
+ } else {
+ return true;
+ }
+
+ return ResolveArgumentsProperty(cx, argsobj, id, flags, resolvedp);
+}
+
+/* static */
+bool UnmappedArgumentsObject::obj_enumerate(JSContext* cx, HandleObject obj) {
+ Rooted<UnmappedArgumentsObject*> argsobj(cx,
+ &obj->as<UnmappedArgumentsObject>());
+
+ RootedId id(cx);
+ bool found;
+
+ // Trigger reflection.
+ id = NameToId(cx->names().length);
+ if (!HasOwnProperty(cx, argsobj, id, &found)) {
+ return false;
+ }
+
+ id = NameToId(cx->names().callee);
+ if (!HasOwnProperty(cx, argsobj, id, &found)) {
+ return false;
+ }
+
+ id = PropertyKey::Symbol(cx->wellKnownSymbols().iterator);
+ if (!HasOwnProperty(cx, argsobj, id, &found)) {
+ return false;
+ }
+
+ for (unsigned i = 0; i < argsobj->initialLength(); i++) {
+ id = PropertyKey::Int(i);
+ if (!HasOwnProperty(cx, argsobj, id, &found)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void ArgumentsObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ MOZ_ASSERT(!IsInsideNursery(obj));
+ ArgumentsObject& argsobj = obj->as<ArgumentsObject>();
+ if (argsobj.data()) {
+ gcx->free_(&argsobj, argsobj.maybeRareData(),
+ RareArgumentsData::bytesRequired(argsobj.initialLength()),
+ MemoryUse::RareArgumentsData);
+ gcx->free_(&argsobj, argsobj.data(),
+ ArgumentsData::bytesRequired(argsobj.data()->numArgs),
+ MemoryUse::ArgumentsData);
+ }
+}
+
+void ArgumentsObject::trace(JSTracer* trc, JSObject* obj) {
+ ArgumentsObject& argsobj = obj->as<ArgumentsObject>();
+ if (ArgumentsData* data =
+ argsobj.data()) { // Template objects have no ArgumentsData.
+ TraceRange(trc, data->numArgs, data->begin(), js_arguments_str);
+ }
+}
+
+/* static */
+size_t ArgumentsObject::objectMoved(JSObject* dst, JSObject* src) {
+ ArgumentsObject* ndst = &dst->as<ArgumentsObject>();
+ const ArgumentsObject* nsrc = &src->as<ArgumentsObject>();
+ MOZ_ASSERT(ndst->data() == nsrc->data());
+
+ if (!IsInsideNursery(src)) {
+ return 0;
+ }
+
+ Nursery& nursery = dst->runtimeFromMainThread()->gc.nursery();
+
+ size_t nbytesTotal = 0;
+ uint32_t nDataBytes = ArgumentsData::bytesRequired(nsrc->data()->numArgs);
+ if (!nursery.isInside(nsrc->data())) {
+ nursery.removeMallocedBufferDuringMinorGC(nsrc->data());
+ } else {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ uint8_t* data = nsrc->zone()->pod_malloc<uint8_t>(nDataBytes);
+ if (!data) {
+ oomUnsafe.crash(
+ "Failed to allocate ArgumentsObject data while tenuring.");
+ }
+ ndst->initFixedSlot(DATA_SLOT, PrivateValue(data));
+
+ mozilla::PodCopy(data, reinterpret_cast<uint8_t*>(nsrc->data()),
+ nDataBytes);
+ nbytesTotal += nDataBytes;
+ }
+
+ AddCellMemory(ndst, nDataBytes, MemoryUse::ArgumentsData);
+
+ if (RareArgumentsData* srcRareData = nsrc->maybeRareData()) {
+ uint32_t nbytes = RareArgumentsData::bytesRequired(nsrc->initialLength());
+ if (!nursery.isInside(srcRareData)) {
+ nursery.removeMallocedBufferDuringMinorGC(srcRareData);
+ } else {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ uint8_t* dstRareData = nsrc->zone()->pod_malloc<uint8_t>(nbytes);
+ if (!dstRareData) {
+ oomUnsafe.crash(
+ "Failed to allocate RareArgumentsData data while tenuring.");
+ }
+ ndst->data()->rareData = (RareArgumentsData*)dstRareData;
+
+ mozilla::PodCopy(dstRareData, reinterpret_cast<uint8_t*>(srcRareData),
+ nbytes);
+ nbytesTotal += nbytes;
+ }
+
+ AddCellMemory(ndst, nbytes, MemoryUse::RareArgumentsData);
+ }
+
+ return nbytesTotal;
+}
+
+/*
+ * The classes below collaborate to lazily reflect and synchronize actual
+ * argument values, argument count, and callee function object stored in a
+ * stack frame with their corresponding property values in the frame's
+ * arguments object.
+ */
+const JSClassOps MappedArgumentsObject::classOps_ = {
+ nullptr, // addProperty
+ ArgumentsObject::obj_delProperty, // delProperty
+ MappedArgumentsObject::obj_enumerate, // enumerate
+ nullptr, // newEnumerate
+ MappedArgumentsObject::obj_resolve, // resolve
+ ArgumentsObject::obj_mayResolve, // mayResolve
+ ArgumentsObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ ArgumentsObject::trace, // trace
+};
+
+const js::ClassExtension MappedArgumentsObject::classExt_ = {
+ ArgumentsObject::objectMoved, // objectMovedOp
+};
+
+const ObjectOps MappedArgumentsObject::objectOps_ = {
+ nullptr, // lookupProperty
+ MappedArgumentsObject::obj_defineProperty, // defineProperty
+ nullptr, // hasProperty
+ nullptr, // getProperty
+ nullptr, // setProperty
+ nullptr, // getOwnPropertyDescriptor
+ nullptr, // deleteProperty
+ nullptr, // getElements
+ nullptr, // funToString
+};
+
+const JSClass MappedArgumentsObject::class_ = {
+ "Arguments",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(MappedArgumentsObject::RESERVED_SLOTS) |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Object) |
+ JSCLASS_SKIP_NURSERY_FINALIZE | JSCLASS_BACKGROUND_FINALIZE,
+ &MappedArgumentsObject::classOps_,
+ nullptr,
+ &MappedArgumentsObject::classExt_,
+ &MappedArgumentsObject::objectOps_};
+
+/*
+ * Unmapped arguments is significantly less magical than mapped arguments, so
+ * it is represented by a different class while sharing some functionality.
+ */
+const JSClassOps UnmappedArgumentsObject::classOps_ = {
+ nullptr, // addProperty
+ ArgumentsObject::obj_delProperty, // delProperty
+ UnmappedArgumentsObject::obj_enumerate, // enumerate
+ nullptr, // newEnumerate
+ UnmappedArgumentsObject::obj_resolve, // resolve
+ ArgumentsObject::obj_mayResolve, // mayResolve
+ ArgumentsObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ ArgumentsObject::trace, // trace
+};
+
+const js::ClassExtension UnmappedArgumentsObject::classExt_ = {
+ ArgumentsObject::objectMoved, // objectMovedOp
+};
+
+const JSClass UnmappedArgumentsObject::class_ = {
+ "Arguments",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(UnmappedArgumentsObject::RESERVED_SLOTS) |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Object) |
+ JSCLASS_SKIP_NURSERY_FINALIZE | JSCLASS_BACKGROUND_FINALIZE,
+ &UnmappedArgumentsObject::classOps_, nullptr,
+ &UnmappedArgumentsObject::classExt_};
diff --git a/js/src/vm/ArgumentsObject.h b/js/src/vm/ArgumentsObject.h
new file mode 100644
index 0000000000..93ad790f88
--- /dev/null
+++ b/js/src/vm/ArgumentsObject.h
@@ -0,0 +1,566 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ArgumentsObject_h
+#define vm_ArgumentsObject_h
+
+#include "mozilla/MemoryReporting.h"
+
+#include "gc/Barrier.h"
+#include "util/BitArray.h"
+#include "vm/NativeObject.h"
+
+namespace js {
+
+class AbstractFramePtr;
+class ArgumentsObject;
+class ScriptFrameIter;
+
+namespace jit {
+class JitFrameLayout;
+} // namespace jit
+
+// RareArgumentsData stores the deleted-elements bits for an arguments object.
+// Because |delete arguments[i]| is uncommon, we allocate this data the first
+// time an element is deleted.
+class RareArgumentsData {
+ // Pointer to an array of bits indicating, for every argument in
+ // [0, initialLength) whether the element has been deleted. See
+ // ArgumentsObject::isElementDeleted comment.
+ size_t deletedBits_[1];
+
+ RareArgumentsData() = default;
+ RareArgumentsData(const RareArgumentsData&) = delete;
+ void operator=(const RareArgumentsData&) = delete;
+
+ public:
+ static RareArgumentsData* create(JSContext* cx, ArgumentsObject* obj);
+ static size_t bytesRequired(size_t numActuals);
+
+ bool isElementDeleted(size_t len, size_t i) const {
+ MOZ_ASSERT(i < len);
+ return IsBitArrayElementSet(deletedBits_, len, i);
+ }
+ void markElementDeleted(size_t len, size_t i) {
+ MOZ_ASSERT(i < len);
+ SetBitArrayElement(deletedBits_, len, i);
+ }
+};
+
+// ArgumentsData stores the initial indexed arguments provided to a function
+// call. It is used to store arguments[i] -- up until the corresponding
+// property is modified, when the relevant value is flagged to memorialize the
+// modification.
+struct ArgumentsData {
+ /*
+ * numArgs = std::max(numFormalArgs, numActualArgs)
+ * The array 'args' has numArgs elements.
+ */
+ uint32_t numArgs;
+
+ RareArgumentsData* rareData;
+
+ /*
+ * This array holds either the current argument value or the magic
+ * forwarding value. The latter means that the function has both a
+ * CallObject and an ArgumentsObject AND the particular formal variable is
+ * aliased by the CallObject. In such cases, the CallObject holds the
+ * canonical value so any element access to the arguments object should load
+ * the value out of the CallObject (which is pointed to by MAYBE_CALL_SLOT).
+ */
+ GCPtr<Value> args[1];
+
+ /* For jit use: */
+ static ptrdiff_t offsetOfArgs() { return offsetof(ArgumentsData, args); }
+
+ /* Iterate args. */
+ GCPtr<Value>* begin() { return args; }
+ const GCPtr<Value>* begin() const { return args; }
+ GCPtr<Value>* end() { return args + numArgs; }
+ const GCPtr<Value>* end() const { return args + numArgs; }
+
+ static size_t bytesRequired(size_t numArgs) {
+ return offsetof(ArgumentsData, args) + numArgs * sizeof(Value);
+ }
+};
+
+// Maximum supported value of arguments.length. This bounds the
+// maximum number of arguments that can be supplied to a spread call
+// or Function.prototype.apply. This value also bounds the number of
+// elements parsed in an array initializer. NB: keep this in sync
+// with the copy in builtin/SelfHostingDefines.h.
+static const unsigned ARGS_LENGTH_MAX = 500 * 1000;
+
+// Maximum number of arguments supported in jitcode. This bounds the
+// maximum number of arguments that can be supplied to a spread call
+// or Function.prototype.apply without entering the VM. We limit the
+// number of parameters we can handle to a number that does not risk
+// us allocating too much stack, notably on Windows where there is a
+// 4K guard page that has to be touched to extend the stack. The value
+// "3000" is the size of the guard page minus an arbitrary, but large,
+// safety margin. See bug 1351278.
+static const uint32_t JIT_ARGS_LENGTH_MAX = 3000 / sizeof(JS::Value);
+
+static_assert(JIT_ARGS_LENGTH_MAX <= ARGS_LENGTH_MAX,
+ "maximum jit arguments should be <= maximum arguments");
+
+/*
+ * [SMDOC] ArgumentsObject
+ *
+ * ArgumentsObject instances represent |arguments| objects created to store
+ * function arguments when a function is called. It's expensive to create such
+ * objects if they're never used, so they're only created when they are
+ * potentially used.
+ *
+ * Arguments objects are complicated because, for non-strict mode code, they
+ * must alias any named arguments which were provided to the function. Gnarly
+ * example:
+ *
+ * function f(a, b, c, d)
+ * {
+ * arguments[0] = "seta";
+ * assertEq(a, "seta");
+ * b = "setb";
+ * assertEq(arguments[1], "setb");
+ * c = "setc";
+ * assertEq(arguments[2], undefined);
+ * arguments[3] = "setd";
+ * assertEq(d, undefined);
+ * }
+ * f("arga", "argb");
+ *
+ * ES5's strict mode behaves more sanely, and named arguments don't alias
+ * elements of an arguments object.
+ *
+ * ArgumentsObject instances use the following reserved slots:
+ *
+ * INITIAL_LENGTH_SLOT
+ * Stores the initial value of arguments.length, plus a bit indicating
+ * whether arguments.length and/or arguments[@@iterator] have been
+ * modified. Use initialLength(), hasOverriddenLength(), and
+ * hasOverriddenIterator() to access these values. If arguments.length has
+ * been modified, then the current value of arguments.length is stored in
+ * another slot associated with a new property.
+ * DATA_SLOT
+ * Stores an ArgumentsData*, described above.
+ * MAYBE_CALL_SLOT
+ * Stores the CallObject, if the callee has aliased bindings. See
+ * the ArgumentsData::args comment.
+ * CALLEE_SLOT
+ * Stores the initial arguments.callee. This value can be overridden on
+ * mapped arguments objects, see hasOverriddenCallee.
+ */
+class ArgumentsObject : public NativeObject {
+ public:
+ static const uint32_t INITIAL_LENGTH_SLOT = 0;
+ static const uint32_t DATA_SLOT = 1;
+ static const uint32_t MAYBE_CALL_SLOT = 2;
+ static const uint32_t CALLEE_SLOT = 3;
+
+ static const uint32_t LENGTH_OVERRIDDEN_BIT = 0x1;
+ static const uint32_t ITERATOR_OVERRIDDEN_BIT = 0x2;
+ static const uint32_t ELEMENT_OVERRIDDEN_BIT = 0x4;
+ static const uint32_t CALLEE_OVERRIDDEN_BIT = 0x8;
+ static const uint32_t FORWARDED_ARGUMENTS_BIT = 0x10;
+ static const uint32_t PACKED_BITS_COUNT = 5;
+ static const uint32_t PACKED_BITS_MASK = (1 << PACKED_BITS_COUNT) - 1;
+
+ static_assert(ARGS_LENGTH_MAX <= (UINT32_MAX >> PACKED_BITS_COUNT),
+ "Max arguments length must fit in available bits");
+
+// Our ability to inline functions that use |arguments| is limited by
+// the number of registers available to represent Value operands to
+// CreateInlinedArgumentsObject.
+#if defined(JS_CODEGEN_X86)
+ static const uint32_t MaxInlinedArgs = 1;
+#else
+ static const uint32_t MaxInlinedArgs = 3;
+#endif
+
+ protected:
+ template <typename CopyArgs>
+ static ArgumentsObject* create(JSContext* cx, HandleFunction callee,
+ unsigned numActuals, CopyArgs& copy);
+
+ ArgumentsData* data() const {
+ return reinterpret_cast<ArgumentsData*>(
+ getFixedSlot(DATA_SLOT).toPrivate());
+ }
+
+ RareArgumentsData* maybeRareData() const { return data()->rareData; }
+
+ [[nodiscard]] bool createRareData(JSContext* cx);
+
+ RareArgumentsData* getOrCreateRareData(JSContext* cx) {
+ if (!data()->rareData && !createRareData(cx)) {
+ return nullptr;
+ }
+ return data()->rareData;
+ }
+
+ static bool obj_delProperty(JSContext* cx, HandleObject obj, HandleId id,
+ ObjectOpResult& result);
+
+ static bool obj_mayResolve(const JSAtomState& names, jsid id, JSObject*);
+
+ public:
+ static const uint32_t RESERVED_SLOTS = 4;
+ static const gc::AllocKind FINALIZE_KIND = gc::AllocKind::OBJECT4_BACKGROUND;
+
+ /* Create an arguments object for a frame that is expecting them. */
+ static ArgumentsObject* createExpected(JSContext* cx, AbstractFramePtr frame);
+
+ /*
+ * Purposefully disconnect the returned arguments object from the frame
+ * by always creating a new copy that does not alias formal parameters.
+ * This allows function-local analysis to determine that formals are
+ * not aliased and generally simplifies arguments objects.
+ */
+ static ArgumentsObject* createUnexpected(JSContext* cx,
+ ScriptFrameIter& iter);
+ static ArgumentsObject* createUnexpected(JSContext* cx,
+ AbstractFramePtr frame);
+
+ static ArgumentsObject* createForIon(JSContext* cx,
+ jit::JitFrameLayout* frame,
+ HandleObject scopeChain);
+ static ArgumentsObject* createForInlinedIon(JSContext* cx, Value* args,
+ HandleFunction callee,
+ HandleObject scopeChain,
+ uint32_t numActuals);
+ static ArgumentsObject* createFromValueArray(JSContext* cx,
+ HandleValueArray argsArray,
+ HandleFunction callee,
+ HandleObject scopeChain,
+ uint32_t numActuals);
+
+ private:
+ template <typename CopyArgs>
+ static ArgumentsObject* finishPure(JSContext* cx, ArgumentsObject* obj,
+ JSFunction* callee, JSObject* callObj,
+ unsigned numActuals, CopyArgs& copy);
+
+ public:
+ /*
+ * Allocate ArgumentsData and fill reserved slots after allocating an
+ * ArgumentsObject in Ion code.
+ */
+ static ArgumentsObject* finishForIonPure(JSContext* cx,
+ jit::JitFrameLayout* frame,
+ JSObject* scopeChain,
+ ArgumentsObject* obj);
+
+ /*
+ * Allocate ArgumentsData for inlined arguments and fill reserved slots after
+ * allocating an ArgumentsObject in Ion code.
+ */
+ static ArgumentsObject* finishInlineForIonPure(
+ JSContext* cx, JSObject* rawCallObj, JSFunction* rawCallee, Value* args,
+ uint32_t numActuals, ArgumentsObject* obj);
+
+ static ArgumentsObject* createTemplateObject(JSContext* cx, bool mapped);
+
+ /*
+ * Return the initial length of the arguments. This may differ from the
+ * current value of arguments.length!
+ */
+ uint32_t initialLength() const {
+ uint32_t argc = uint32_t(getFixedSlot(INITIAL_LENGTH_SLOT).toInt32()) >>
+ PACKED_BITS_COUNT;
+ MOZ_ASSERT(argc <= ARGS_LENGTH_MAX);
+ return argc;
+ }
+
+ // True iff arguments.length has been assigned or deleted.
+ bool hasOverriddenLength() const {
+ const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT);
+ return v.toInt32() & LENGTH_OVERRIDDEN_BIT;
+ }
+
+ void markLengthOverridden() {
+ uint32_t v =
+ getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | LENGTH_OVERRIDDEN_BIT;
+ setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v));
+ }
+
+ // Create the default "length" property and set LENGTH_OVERRIDDEN_BIT.
+ static bool reifyLength(JSContext* cx, Handle<ArgumentsObject*> obj);
+
+ // True iff arguments[@@iterator] has been assigned or deleted.
+ bool hasOverriddenIterator() const {
+ const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT);
+ return v.toInt32() & ITERATOR_OVERRIDDEN_BIT;
+ }
+
+ void markIteratorOverridden() {
+ uint32_t v =
+ getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | ITERATOR_OVERRIDDEN_BIT;
+ setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v));
+ }
+
+ // Create the default @@iterator property and set ITERATOR_OVERRIDDEN_BIT.
+ static bool reifyIterator(JSContext* cx, Handle<ArgumentsObject*> obj);
+
+ /*
+ * Return the arguments iterator function.
+ */
+ static bool getArgumentsIterator(JSContext* cx, MutableHandleValue val);
+
+ // True iff any element has been assigned or deleted.
+ bool hasOverriddenElement() const {
+ const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT);
+ return v.toInt32() & ELEMENT_OVERRIDDEN_BIT;
+ }
+
+ void markElementOverridden() {
+ uint32_t v =
+ getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | ELEMENT_OVERRIDDEN_BIT;
+ setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v));
+ }
+
+ private:
+ /*
+ * Because the arguments object is a real object, its elements may be
+ * deleted. This is implemented by setting a 'deleted' flag for the arg
+ * which is read by argument object resolve and getter/setter hooks.
+ *
+ * NB: an element, once deleted, stays deleted. Thus:
+ *
+ * function f(x) { delete arguments[0]; arguments[0] = 42; return x }
+ * assertEq(f(1), 1);
+ *
+ * This works because, once a property is deleted from an arguments object,
+ * it gets regular properties with regular getters/setters that don't alias
+ * ArgumentsData::args.
+ */
+ bool isElementDeleted(uint32_t i) const {
+ MOZ_ASSERT(i < data()->numArgs);
+ if (i >= initialLength()) {
+ return false;
+ }
+ bool result = maybeRareData() &&
+ maybeRareData()->isElementDeleted(initialLength(), i);
+ MOZ_ASSERT_IF(result, hasOverriddenElement());
+ return result;
+ }
+
+ protected:
+ bool markElementDeleted(JSContext* cx, uint32_t i);
+
+ public:
+ /*
+ * Return true iff the index is a valid element index for this arguments
+ * object.
+ *
+ * Returning true here doesn't imply that the element value can be read
+ * through |ArgumentsObject::element()|. For example unmapped arguments
+ * objects can have an element index property redefined without having marked
+ * the element as deleted. Instead use |maybeGetElement()| or manually check
+ * for |hasOverriddenElement()|.
+ */
+ bool isElement(uint32_t i) const {
+ return i < initialLength() && !isElementDeleted(i);
+ }
+
+ /*
+ * An ArgumentsObject serves two roles:
+ * - a real object, accessed through regular object operations, e.g..,
+ * GetElement corresponding to 'arguments[i]';
+ * - a VM-internal data structure, storing the value of arguments (formal
+ * and actual) that are accessed directly by the VM when a reading the
+ * value of a formal parameter.
+ * There are two ways to access the ArgumentsData::args corresponding to
+ * these two use cases:
+ * - object access should use elements(i) which will take care of
+ * forwarding when the value is the magic forwarding value;
+ * - VM argument access should use arg(i) which will assert that the
+ * value is not the magic forwarding value (since, if such forwarding was
+ * needed, the frontend should have emitted JSOp::GetAliasedVar).
+ */
+ const Value& element(uint32_t i) const;
+
+ inline void setElement(uint32_t i, const Value& v);
+
+ const Value& arg(unsigned i) const {
+ MOZ_ASSERT(i < data()->numArgs);
+ const Value& v = data()->args[i];
+ MOZ_ASSERT(!v.isMagic());
+ return v;
+ }
+
+ void setArg(unsigned i, const Value& v) {
+ MOZ_ASSERT(i < data()->numArgs);
+ GCPtr<Value>& lhs = data()->args[i];
+ MOZ_ASSERT(!lhs.isMagic());
+ lhs = v;
+ }
+
+ /*
+ * Test if an argument is forwarded, i.e. its actual value is stored in the
+ * CallObject and can't be directly read from |ArgumentsData::args|.
+ */
+ bool argIsForwarded(unsigned i) const {
+ MOZ_ASSERT(i < data()->numArgs);
+ const Value& v = data()->args[i];
+ MOZ_ASSERT_IF(IsMagicScopeSlotValue(v), anyArgIsForwarded());
+ return IsMagicScopeSlotValue(v);
+ }
+
+ bool anyArgIsForwarded() const {
+ const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT);
+ return v.toInt32() & FORWARDED_ARGUMENTS_BIT;
+ }
+
+ void markArgumentForwarded() {
+ uint32_t v =
+ getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | FORWARDED_ARGUMENTS_BIT;
+ setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v));
+ }
+
+ /*
+ * Attempt to speedily and efficiently access the i-th element of this
+ * arguments object. Return true if the element was speedily returned.
+ * Return false if the element must be looked up more slowly using
+ * getProperty or some similar method. The second overload copies the
+ * elements [start, start + count) into the locations starting at 'vp'.
+ *
+ * NB: Returning false does not indicate error!
+ */
+ bool maybeGetElement(uint32_t i, MutableHandleValue vp) {
+ if (i >= initialLength() || hasOverriddenElement()) {
+ return false;
+ }
+ vp.set(element(i));
+ return true;
+ }
+
+ inline bool maybeGetElements(uint32_t start, uint32_t count, js::Value* vp);
+
+ /*
+ * Measures things hanging off this ArgumentsObject that are counted by the
+ * |miscSize| argument in JSObject::sizeOfExcludingThis().
+ */
+ size_t sizeOfMisc(mozilla::MallocSizeOf mallocSizeOf) const {
+ if (!data()) { // Template arguments objects have no data.
+ return 0;
+ }
+ return mallocSizeOf(data()) + mallocSizeOf(maybeRareData());
+ }
+ size_t sizeOfData() const {
+ return ArgumentsData::bytesRequired(data()->numArgs) +
+ (maybeRareData() ? RareArgumentsData::bytesRequired(initialLength())
+ : 0);
+ }
+
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+ static void trace(JSTracer* trc, JSObject* obj);
+ static size_t objectMoved(JSObject* dst, JSObject* src);
+
+ /* For jit use: */
+ static size_t getDataSlotOffset() { return getFixedSlotOffset(DATA_SLOT); }
+ static size_t getInitialLengthSlotOffset() {
+ return getFixedSlotOffset(INITIAL_LENGTH_SLOT);
+ }
+
+ static Value MagicEnvSlotValue(uint32_t slot) {
+ // When forwarding slots to a backing CallObject, the slot numbers are
+ // stored as uint32 magic values. This raises an ambiguity if we have
+ // also copied JS_OPTIMIZED_OUT magic from a JIT frame or
+ // JS_UNINITIALIZED_LEXICAL magic on the CallObject. To distinguish
+ // normal magic values (those with a JSWhyMagic) and uint32 magic
+ // values, we add the maximum JSWhyMagic value to the slot
+ // number. This is safe as ARGS_LENGTH_MAX is well below UINT32_MAX.
+ static_assert(UINT32_MAX - JS_WHY_MAGIC_COUNT > ARGS_LENGTH_MAX);
+ return JS::MagicValueUint32(slot + JS_WHY_MAGIC_COUNT);
+ }
+ static uint32_t SlotFromMagicScopeSlotValue(const Value& v) {
+ static_assert(UINT32_MAX - JS_WHY_MAGIC_COUNT > ARGS_LENGTH_MAX);
+ return v.magicUint32() - JS_WHY_MAGIC_COUNT;
+ }
+ static bool IsMagicScopeSlotValue(const Value& v) {
+ return v.isMagic() && v.magicUint32() > JS_WHY_MAGIC_COUNT;
+ }
+
+ static void MaybeForwardToCallObject(AbstractFramePtr frame,
+ ArgumentsObject* obj,
+ ArgumentsData* data);
+ static void MaybeForwardToCallObject(JSFunction* callee, JSObject* callObj,
+ ArgumentsObject* obj,
+ ArgumentsData* data);
+};
+
+class MappedArgumentsObject : public ArgumentsObject {
+ static const JSClassOps classOps_;
+ static const ClassExtension classExt_;
+ static const ObjectOps objectOps_;
+
+ public:
+ static const JSClass class_;
+
+ JSFunction& callee() const {
+ return getFixedSlot(CALLEE_SLOT).toObject().as<JSFunction>();
+ }
+
+ bool hasOverriddenCallee() const {
+ const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT);
+ return v.toInt32() & CALLEE_OVERRIDDEN_BIT;
+ }
+
+ void markCalleeOverridden() {
+ uint32_t v =
+ getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | CALLEE_OVERRIDDEN_BIT;
+ setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v));
+ }
+
+ static size_t getCalleeSlotOffset() {
+ return getFixedSlotOffset(CALLEE_SLOT);
+ }
+
+ // Create the default "callee" property and set CALLEE_OVERRIDDEN_BIT.
+ static bool reifyCallee(JSContext* cx, Handle<MappedArgumentsObject*> obj);
+
+ private:
+ static bool obj_enumerate(JSContext* cx, HandleObject obj);
+ static bool obj_resolve(JSContext* cx, HandleObject obj, HandleId id,
+ bool* resolvedp);
+ static bool obj_defineProperty(JSContext* cx, HandleObject obj, HandleId id,
+ Handle<JS::PropertyDescriptor> desc,
+ ObjectOpResult& result);
+};
+
+class UnmappedArgumentsObject : public ArgumentsObject {
+ static const JSClassOps classOps_;
+ static const ClassExtension classExt_;
+
+ public:
+ static const JSClass class_;
+
+ private:
+ static bool obj_enumerate(JSContext* cx, HandleObject obj);
+ static bool obj_resolve(JSContext* cx, HandleObject obj, HandleId id,
+ bool* resolvedp);
+};
+
+extern bool MappedArgGetter(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleValue vp);
+
+extern bool MappedArgSetter(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue v, ObjectOpResult& result);
+
+extern bool UnmappedArgGetter(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleValue vp);
+
+extern bool UnmappedArgSetter(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue v, ObjectOpResult& result);
+
+} // namespace js
+
+template <>
+inline bool JSObject::is<js::ArgumentsObject>() const {
+ return is<js::MappedArgumentsObject>() || is<js::UnmappedArgumentsObject>();
+}
+
+#endif /* vm_ArgumentsObject_h */
diff --git a/js/src/vm/ArrayBufferObject-inl.h b/js/src/vm/ArrayBufferObject-inl.h
new file mode 100644
index 0000000000..1ca36c243d
--- /dev/null
+++ b/js/src/vm/ArrayBufferObject-inl.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ArrayBufferObject_inl_h
+#define vm_ArrayBufferObject_inl_h
+
+// Utilities and common inline code for ArrayBufferObject and
+// SharedArrayBufferObject.
+
+#include "vm/ArrayBufferObject.h"
+
+#include "vm/SharedArrayObject.h"
+#include "vm/SharedMem.h"
+
+namespace js {
+
+inline SharedMem<uint8_t*> ArrayBufferObjectMaybeShared::dataPointerEither() {
+ if (this->is<ArrayBufferObject>()) {
+ return this->as<ArrayBufferObject>().dataPointerShared();
+ }
+ return this->as<SharedArrayBufferObject>().dataPointerShared();
+}
+
+inline bool ArrayBufferObjectMaybeShared::isDetached() const {
+ if (this->is<ArrayBufferObject>()) {
+ return this->as<ArrayBufferObject>().isDetached();
+ }
+ return false;
+}
+
+inline size_t ArrayBufferObjectMaybeShared::byteLength() const {
+ if (this->is<ArrayBufferObject>()) {
+ return this->as<ArrayBufferObject>().byteLength();
+ }
+ return this->as<SharedArrayBufferObject>().byteLength();
+}
+
+inline bool ArrayBufferObjectMaybeShared::isPreparedForAsmJS() const {
+ if (this->is<ArrayBufferObject>()) {
+ return this->as<ArrayBufferObject>().isPreparedForAsmJS();
+ }
+ return false;
+}
+
+inline bool ArrayBufferObjectMaybeShared::isWasm() const {
+ if (this->is<ArrayBufferObject>()) {
+ return this->as<ArrayBufferObject>().isWasm();
+ }
+ return this->as<SharedArrayBufferObject>().isWasm();
+}
+
+} // namespace js
+
+#endif // vm_ArrayBufferObject_inl_h
diff --git a/js/src/vm/ArrayBufferObject.cpp b/js/src/vm/ArrayBufferObject.cpp
new file mode 100644
index 0000000000..ec7801a31d
--- /dev/null
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -0,0 +1,2204 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/ArrayBufferObject.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Likely.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/TaggedAnonymousMemory.h"
+
+#include <algorithm> // std::max, std::min
+#include <memory> // std::uninitialized_copy_n
+#include <string.h>
+#if !defined(XP_WIN) && !defined(__wasi__)
+# include <sys/mman.h>
+#endif
+#include <tuple> // std::tuple
+#include <type_traits>
+#ifdef MOZ_VALGRIND
+# include <valgrind/memcheck.h>
+#endif
+
+#include "jsnum.h"
+#include "jstypes.h"
+
+#include "gc/Barrier.h"
+#include "gc/Memory.h"
+#include "js/ArrayBuffer.h"
+#include "js/Conversions.h"
+#include "js/experimental/TypedData.h" // JS_IsArrayBufferViewObject
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/MemoryMetrics.h"
+#include "js/PropertySpec.h"
+#include "js/SharedArrayBuffer.h"
+#include "js/Wrapper.h"
+#include "util/WindowsWrapper.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/SharedArrayObject.h"
+#include "vm/Warnings.h" // js::WarnNumberASCII
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmLog.h"
+#include "wasm/WasmMemory.h"
+#include "wasm/WasmModuleTypes.h"
+#include "wasm/WasmProcess.h"
+
+#include "gc/GCContext-inl.h"
+#include "gc/Marking-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/Realm-inl.h" // js::AutoRealm
+
+using JS::ToInt32;
+
+using js::wasm::IndexType;
+using js::wasm::Pages;
+using mozilla::Atomic;
+using mozilla::CheckedInt;
+using mozilla::DebugOnly;
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+
+using namespace js;
+
+// Wasm allows large amounts of memory to be reserved at a time. On 64-bit
+// platforms (with "huge memories") we reserve around 4GB of virtual address
+// space for every wasm memory; on 32-bit platforms we usually do not, but users
+// often initialize memories in the hundreds of megabytes.
+//
+// If too many wasm memories remain live, we run up against system resource
+// exhaustion (address space or number of memory map descriptors) - see bug
+// 1068684, bug 1073934, bug 1517412, bug 1502733 for details. The limiting case
+// seems to be Android on ARM64, where the per-process address space is limited
+// to 4TB (39 bits) by the organization of the page tables. An earlier problem
+// was Windows Vista Home 64-bit, where the per-process address space is limited
+// to 8TB (40 bits). And 32-bit platforms only have 4GB of address space anyway.
+//
+// Thus we track the amount of memory reserved for wasm, and set a limit per
+// process. We trigger GC work when we approach the limit and we throw an OOM
+// error if the per-process limit is exceeded. The limit (WasmReservedBytesMax)
+// is specific to architecture, OS, and OS configuration.
+//
+// Since the WasmReservedBytesMax limit is not generally accounted for by
+// any existing GC-trigger heuristics, we need an extra heuristic for triggering
+// GCs when the caller is allocating memories rapidly without other garbage
+// (e.g. bug 1773225). Thus, once the reserved memory crosses the threshold
+// WasmReservedBytesStartTriggering, we start triggering GCs every
+// WasmReservedBytesPerTrigger bytes. Once we reach
+// WasmReservedBytesStartSyncFullGC bytes reserved, we perform expensive
+// non-incremental full GCs as a last-ditch effort to avoid unnecessary failure.
+// Once we reach WasmReservedBytesMax, we perform further full GCs before giving
+// up.
+//
+// (History: The original implementation only tracked the number of "huge
+// memories" allocated by WASM, but this was found to be insufficient because
+// 32-bit platforms have similar resource exhaustion issues. We now track
+// reserved bytes directly.)
+//
+// (We also used to reserve significantly more than 4GB for huge memories, but
+// this was reduced in bug 1442544.)
+
+// ASAN and TSAN use a ton of vmem for bookkeeping leaving a lot less for the
+// program so use a lower limit.
+#if defined(MOZ_TSAN) || defined(MOZ_ASAN)
+static const uint64_t WasmMemAsanOverhead = 2;
+#else
+static const uint64_t WasmMemAsanOverhead = 1;
+#endif
+
+// WasmReservedStartTriggering + WasmReservedPerTrigger must be well below
+// WasmReservedStartSyncFullGC in order to provide enough time for incremental
+// GC to do its job.
+
+#if defined(JS_CODEGEN_ARM64) && defined(ANDROID)
+
+static const uint64_t WasmReservedBytesMax =
+ 75 * wasm::HugeMappedSize / WasmMemAsanOverhead;
+static const uint64_t WasmReservedBytesStartTriggering =
+ 15 * wasm::HugeMappedSize;
+static const uint64_t WasmReservedBytesStartSyncFullGC =
+ WasmReservedBytesMax - 15 * wasm::HugeMappedSize;
+static const uint64_t WasmReservedBytesPerTrigger = 15 * wasm::HugeMappedSize;
+
+#elif defined(WASM_SUPPORTS_HUGE_MEMORY)
+
+static const uint64_t WasmReservedBytesMax =
+ 1000 * wasm::HugeMappedSize / WasmMemAsanOverhead;
+static const uint64_t WasmReservedBytesStartTriggering =
+ 100 * wasm::HugeMappedSize;
+static const uint64_t WasmReservedBytesStartSyncFullGC =
+ WasmReservedBytesMax - 100 * wasm::HugeMappedSize;
+static const uint64_t WasmReservedBytesPerTrigger = 100 * wasm::HugeMappedSize;
+
+#else // 32-bit (and weird 64-bit platforms without huge memory)
+
+static const uint64_t GiB = 1024 * 1024 * 1024;
+
+static const uint64_t WasmReservedBytesMax =
+ (4 * GiB) / 2 / WasmMemAsanOverhead;
+static const uint64_t WasmReservedBytesStartTriggering = (4 * GiB) / 8;
+static const uint64_t WasmReservedBytesStartSyncFullGC =
+ WasmReservedBytesMax - (4 * GiB) / 8;
+static const uint64_t WasmReservedBytesPerTrigger = (4 * GiB) / 8;
+
+#endif
+
+// The total number of bytes reserved for wasm memories.
+static Atomic<uint64_t, mozilla::ReleaseAcquire> wasmReservedBytes(0);
+// The number of bytes of wasm memory reserved since the last GC trigger.
+static Atomic<uint64_t, mozilla::ReleaseAcquire> wasmReservedBytesSinceLast(0);
+
+uint64_t js::WasmReservedBytes() { return wasmReservedBytes; }
+
+[[nodiscard]] static bool CheckArrayBufferTooLarge(JSContext* cx,
+ uint64_t nbytes) {
+ // Refuse to allocate too large buffers.
+ if (MOZ_UNLIKELY(nbytes > ArrayBufferObject::MaxByteLength)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return false;
+ }
+
+ return true;
+}
+
+void* js::MapBufferMemory(wasm::IndexType t, size_t mappedSize,
+ size_t initialCommittedSize) {
+ MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
+ MOZ_ASSERT(initialCommittedSize % gc::SystemPageSize() == 0);
+ MOZ_ASSERT(initialCommittedSize <= mappedSize);
+
+ auto failed = mozilla::MakeScopeExit(
+ [&] { wasmReservedBytes -= uint64_t(mappedSize); });
+ wasmReservedBytes += uint64_t(mappedSize);
+
+ // Test >= to guard against the case where multiple extant runtimes
+ // race to allocate.
+ if (wasmReservedBytes >= WasmReservedBytesMax) {
+ if (OnLargeAllocationFailure) {
+ OnLargeAllocationFailure();
+ }
+ if (wasmReservedBytes >= WasmReservedBytesMax) {
+ return nullptr;
+ }
+ }
+
+#ifdef XP_WIN
+ void* data = VirtualAlloc(nullptr, mappedSize, MEM_RESERVE, PAGE_NOACCESS);
+ if (!data) {
+ return nullptr;
+ }
+
+ if (!VirtualAlloc(data, initialCommittedSize, MEM_COMMIT, PAGE_READWRITE)) {
+ VirtualFree(data, 0, MEM_RELEASE);
+ return nullptr;
+ }
+#elif defined(__wasi__)
+ void* data = nullptr;
+ if (int err = posix_memalign(&data, gc::SystemPageSize(), mappedSize)) {
+ MOZ_ASSERT(err == ENOMEM);
+ return nullptr;
+ }
+ MOZ_ASSERT(data);
+ memset(data, 0, mappedSize);
+#else // !XP_WIN && !__wasi__
+ void* data =
+ MozTaggedAnonymousMmap(nullptr, mappedSize, PROT_NONE,
+ MAP_PRIVATE | MAP_ANON, -1, 0, "wasm-reserved");
+ if (data == MAP_FAILED) {
+ return nullptr;
+ }
+
+ // Note we will waste a page on zero-sized memories here
+ if (mprotect(data, initialCommittedSize, PROT_READ | PROT_WRITE)) {
+ munmap(data, mappedSize);
+ return nullptr;
+ }
+#endif // !XP_WIN && !__wasi__
+
+#if defined(MOZ_VALGRIND) && \
+ defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
+ VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(
+ (unsigned char*)data + initialCommittedSize,
+ mappedSize - initialCommittedSize);
+#endif
+
+ failed.release();
+ return data;
+}
+
+bool js::CommitBufferMemory(void* dataEnd, size_t delta) {
+ MOZ_ASSERT(delta);
+ MOZ_ASSERT(delta % gc::SystemPageSize() == 0);
+
+#ifdef XP_WIN
+ if (!VirtualAlloc(dataEnd, delta, MEM_COMMIT, PAGE_READWRITE)) {
+ return false;
+ }
+#elif defined(__wasi__)
+ // posix_memalign'd memory is already committed
+ return true;
+#else
+ if (mprotect(dataEnd, delta, PROT_READ | PROT_WRITE)) {
+ return false;
+ }
+#endif // XP_WIN
+
+#if defined(MOZ_VALGRIND) && \
+ defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
+ VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, delta);
+#endif
+
+ return true;
+}
+
+bool js::ExtendBufferMapping(void* dataPointer, size_t mappedSize,
+ size_t newMappedSize) {
+ MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
+ MOZ_ASSERT(newMappedSize % gc::SystemPageSize() == 0);
+ MOZ_ASSERT(newMappedSize >= mappedSize);
+
+#ifdef XP_WIN
+ void* mappedEnd = (char*)dataPointer + mappedSize;
+ uint32_t delta = newMappedSize - mappedSize;
+ if (!VirtualAlloc(mappedEnd, delta, MEM_RESERVE, PAGE_NOACCESS)) {
+ return false;
+ }
+ return true;
+#elif defined(__wasi__)
+ return false;
+#elif defined(XP_LINUX)
+ // Note this will not move memory (no MREMAP_MAYMOVE specified)
+ if (MAP_FAILED == mremap(dataPointer, mappedSize, newMappedSize, 0)) {
+ return false;
+ }
+ return true;
+#else
+ // No mechanism for remapping on MacOS and other Unices. Luckily
+ // shouldn't need it here as most of these are 64-bit.
+ return false;
+#endif
+}
+
+void js::UnmapBufferMemory(wasm::IndexType t, void* base, size_t mappedSize) {
+ MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
+
+#ifdef XP_WIN
+ VirtualFree(base, 0, MEM_RELEASE);
+#elif defined(__wasi__)
+ free(base);
+#else
+ munmap(base, mappedSize);
+#endif // XP_WIN
+
+#if defined(MOZ_VALGRIND) && \
+ defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
+ VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)base,
+ mappedSize);
+#endif
+
+ // Untrack reserved memory *after* releasing memory -- otherwise, a race
+ // condition could enable the creation of unlimited buffers.
+ wasmReservedBytes -= uint64_t(mappedSize);
+}
+
+/*
+ * ArrayBufferObject
+ *
+ * This class holds the underlying raw buffer that the TypedArrayObject classes
+ * access. It can be created explicitly and passed to a TypedArrayObject, or
+ * can be created implicitly by constructing a TypedArrayObject with a size.
+ */
+
+/*
+ * ArrayBufferObject (base)
+ */
+
+static const JSClassOps ArrayBufferObjectClassOps = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ ArrayBufferObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ nullptr, // trace
+};
+
+static const JSFunctionSpec arraybuffer_functions[] = {
+ JS_FN("isView", ArrayBufferObject::fun_isView, 1, 0), JS_FS_END};
+
+static const JSPropertySpec arraybuffer_properties[] = {
+ JS_SELF_HOSTED_SYM_GET(species, "$ArrayBufferSpecies", 0), JS_PS_END};
+
+static const JSFunctionSpec arraybuffer_proto_functions[] = {
+ JS_SELF_HOSTED_FN("slice", "ArrayBufferSlice", 2, 0), JS_FS_END};
+
+static const JSPropertySpec arraybuffer_proto_properties[] = {
+ JS_PSG("byteLength", ArrayBufferObject::byteLengthGetter, 0),
+ JS_STRING_SYM_PS(toStringTag, "ArrayBuffer", JSPROP_READONLY), JS_PS_END};
+
+static const ClassSpec ArrayBufferObjectClassSpec = {
+ GenericCreateConstructor<ArrayBufferObject::class_constructor, 1,
+ gc::AllocKind::FUNCTION>,
+ GenericCreatePrototype<ArrayBufferObject>,
+ arraybuffer_functions,
+ arraybuffer_properties,
+ arraybuffer_proto_functions,
+ arraybuffer_proto_properties};
+
+static const ClassExtension ArrayBufferObjectClassExtension = {
+ ArrayBufferObject::objectMoved, // objectMovedOp
+};
+
+const JSClass ArrayBufferObject::class_ = {
+ "ArrayBuffer",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(RESERVED_SLOTS) |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_ArrayBuffer) |
+ JSCLASS_BACKGROUND_FINALIZE,
+ &ArrayBufferObjectClassOps, &ArrayBufferObjectClassSpec,
+ &ArrayBufferObjectClassExtension};
+
+const JSClass ArrayBufferObject::protoClass_ = {
+ "ArrayBuffer.prototype", JSCLASS_HAS_CACHED_PROTO(JSProto_ArrayBuffer),
+ JS_NULL_CLASS_OPS, &ArrayBufferObjectClassSpec};
+
+static bool IsArrayBuffer(HandleValue v) {
+ return v.isObject() && v.toObject().is<ArrayBufferObject>();
+}
+
+MOZ_ALWAYS_INLINE bool ArrayBufferObject::byteLengthGetterImpl(
+ JSContext* cx, const CallArgs& args) {
+ MOZ_ASSERT(IsArrayBuffer(args.thisv()));
+ auto* buffer = &args.thisv().toObject().as<ArrayBufferObject>();
+ args.rval().setNumber(buffer->byteLength());
+ return true;
+}
+
+bool ArrayBufferObject::byteLengthGetter(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsArrayBuffer, byteLengthGetterImpl>(cx, args);
+}
+
+/*
+ * ArrayBuffer.isView(obj); ES6 (Dec 2013 draft) 24.1.3.1
+ */
+bool ArrayBufferObject::fun_isView(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setBoolean(args.get(0).isObject() &&
+ JS_IsArrayBufferViewObject(&args.get(0).toObject()));
+ return true;
+}
+
+// ES2017 draft 24.1.2.1
+bool ArrayBufferObject::class_constructor(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Step 1.
+ if (!ThrowIfNotConstructing(cx, args, "ArrayBuffer")) {
+ return false;
+ }
+
+ // Step 2.
+ uint64_t byteLength;
+ if (!ToIndex(cx, args.get(0), &byteLength)) {
+ return false;
+ }
+
+ // Step 3 (Inlined 24.1.1.1 AllocateArrayBuffer).
+ // 24.1.1.1, step 1 (Inlined 9.1.14 OrdinaryCreateFromConstructor).
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_ArrayBuffer,
+ &proto)) {
+ return false;
+ }
+
+ // 24.1.1.1, step 3 (Inlined 6.2.6.1 CreateByteDataBlock, step 2).
+ if (!CheckArrayBufferTooLarge(cx, byteLength)) {
+ return false;
+ }
+
+ // 24.1.1.1, steps 1 and 4-6.
+ JSObject* bufobj = createZeroed(cx, byteLength, proto);
+ if (!bufobj) {
+ return false;
+ }
+ args.rval().setObject(*bufobj);
+ return true;
+}
+
+using ArrayBufferContents = UniquePtr<uint8_t[], JS::FreePolicy>;
+
+static ArrayBufferContents AllocateUninitializedArrayBufferContents(
+ JSContext* cx, size_t nbytes) {
+ // First attempt a normal allocation.
+ uint8_t* p =
+ cx->maybe_pod_arena_malloc<uint8_t>(js::ArrayBufferContentsArena, nbytes);
+ if (MOZ_UNLIKELY(!p)) {
+ // Otherwise attempt a large allocation, calling the
+ // large-allocation-failure callback if necessary.
+ p = static_cast<uint8_t*>(cx->runtime()->onOutOfMemoryCanGC(
+ js::AllocFunction::Malloc, js::ArrayBufferContentsArena, nbytes));
+ if (!p) {
+ ReportOutOfMemory(cx);
+ }
+ }
+
+ return ArrayBufferContents(p);
+}
+
+static ArrayBufferContents AllocateArrayBufferContents(JSContext* cx,
+ size_t nbytes) {
+ // First attempt a normal allocation.
+ uint8_t* p =
+ cx->maybe_pod_arena_calloc<uint8_t>(js::ArrayBufferContentsArena, nbytes);
+ if (MOZ_UNLIKELY(!p)) {
+ // Otherwise attempt a large allocation, calling the
+ // large-allocation-failure callback if necessary.
+ p = static_cast<uint8_t*>(cx->runtime()->onOutOfMemoryCanGC(
+ js::AllocFunction::Calloc, js::ArrayBufferContentsArena, nbytes));
+ if (!p) {
+ ReportOutOfMemory(cx);
+ }
+ }
+
+ return ArrayBufferContents(p);
+}
+
+static ArrayBufferContents NewCopiedBufferContents(
+ JSContext* cx, Handle<ArrayBufferObject*> buffer) {
+ ArrayBufferContents dataCopy =
+ AllocateUninitializedArrayBufferContents(cx, buffer->byteLength());
+ if (dataCopy) {
+ if (auto count = buffer->byteLength()) {
+ memcpy(dataCopy.get(), buffer->dataPointer(), count);
+ }
+ }
+ return dataCopy;
+}
+
+/* static */
+void ArrayBufferObject::detach(JSContext* cx,
+ Handle<ArrayBufferObject*> buffer) {
+ cx->check(buffer);
+ MOZ_ASSERT(!buffer->isPreparedForAsmJS());
+
+ // Update all views of the buffer to account for the buffer having been
+ // detached, and clear the buffer's data and list of views.
+ //
+ // Typed object buffers are not exposed and cannot be detached.
+
+ auto& innerViews = ObjectRealm::get(buffer).innerViews.get();
+ if (InnerViewTable::ViewVector* views =
+ innerViews.maybeViewsUnbarriered(buffer)) {
+ for (size_t i = 0; i < views->length(); i++) {
+ JSObject* view = (*views)[i];
+ view->as<ArrayBufferViewObject>().notifyBufferDetached();
+ }
+ innerViews.removeViews(buffer);
+ }
+ if (JSObject* view = buffer->firstView()) {
+ view->as<ArrayBufferViewObject>().notifyBufferDetached();
+ buffer->setFirstView(nullptr);
+ }
+
+ if (buffer->dataPointer()) {
+ buffer->releaseData(cx->gcContext());
+ buffer->setDataPointer(BufferContents::createNoData());
+ }
+
+ buffer->setByteLength(0);
+ buffer->setIsDetached();
+}
+
+/* clang-format off */
+/*
+ * [SMDOC] WASM Linear Memory structure
+ *
+ * Wasm Raw Buf Linear Memory Structure
+ *
+ * The linear heap in Wasm is an mmaped array buffer. Several constants manage
+ * its lifetime:
+ *
+ * - byteLength - the wasm-visible current length of the buffer in
+ * bytes. Accesses in the range [0, byteLength] succeed. May only increase.
+ *
+ * - boundsCheckLimit - the size against which we perform bounds checks. The
+ * value of this depends on the bounds checking strategy chosen for the array
+ * buffer and the specific bounds checking semantics. For asm.js code and
+ * for wasm code running with explicit bounds checking, it is the always the
+ * same as the byteLength. For wasm code using the huge-memory trick, it is
+ * always wasm::GuardSize smaller than mappedSize.
+ *
+ * See also "Linear memory addresses and bounds checking" in
+ * wasm/WasmMemory.cpp.
+ *
+ * See also WasmMemoryObject::boundsCheckLimit().
+ *
+ * - sourceMaxSize - the optional declared limit on how far byteLength can grow
+ * in pages. This is the unmodified maximum size from the source module or
+ * JS-API invocation. This may not be representable in byte lengths, nor
+ * feasible for a module to actually grow to due to implementation limits.
+ * It is used for correct linking checks and js-types reflection.
+ *
+ * - clampedMaxSize - the maximum size on how far the byteLength can grow in
+ * pages. This value respects implementation limits and is always
+ * representable as a byte length. Every memory has a clampedMaxSize, even if
+ * no maximum was specified in source. When a memory has no sourceMaxSize,
+ * the clampedMaxSize will be the maximum amount of memory that can be grown
+ * to while still respecting implementation limits.
+ *
+ * - mappedSize - the actual mmapped size. Access in the range [0, mappedSize]
+ * will either succeed, or be handled by the wasm signal handlers. If
+ * sourceMaxSize is present at initialization, then we attempt to map the
+ * whole clampedMaxSize. Otherwise we only map the region needed for the
+ * initial size.
+ *
+ * The below diagram shows the layout of the wasm heap. The wasm-visible portion
+ * of the heap starts at 0. There is one extra page prior to the start of the
+ * wasm heap which contains the WasmArrayRawBuffer struct at its end (i.e. right
+ * before the start of the WASM heap).
+ *
+ * WasmArrayRawBuffer
+ * \ ArrayBufferObject::dataPointer()
+ * \ /
+ * \ |
+ * ______|_|______________________________________________________
+ * |______|_|______________|___________________|___________________|
+ * 0 byteLength clampedMaxSize mappedSize
+ *
+ * \_______________________/
+ * COMMITED
+ * \_____________________________________/
+ * SLOP
+ * \______________________________________________________________/
+ * MAPPED
+ *
+ * Invariants on byteLength, clampedMaxSize, and mappedSize:
+ * - byteLength only increases
+ * - 0 <= byteLength <= clampedMaxSize <= mappedSize
+ * - if sourceMaxSize is not specified, mappedSize may grow.
+ * It is otherwise constant.
+ * - initialLength <= clampedMaxSize <= sourceMaxSize (if present)
+ * - clampedMaxSize <= wasm::MaxMemoryPages()
+ *
+ * Invariants on boundsCheckLimit:
+ * - for wasm code with the huge-memory trick,
+ * clampedMaxSize <= boundsCheckLimit <= mappedSize
+ * - for asm.js code or wasm with explicit bounds checking,
+ * byteLength == boundsCheckLimit <= clampedMaxSize
+ * - on ARM, boundsCheckLimit must be a valid ARM immediate.
+ * - if sourceMaxSize is not specified, boundsCheckLimit may grow as
+ * mappedSize grows. They are otherwise constant.
+
+ * NOTE: For asm.js on 32-bit platforms and on all platforms when running with
+ * explicit bounds checking, we guarantee that
+ *
+ * byteLength == maxSize == boundsCheckLimit == mappedSize
+ *
+ * That is, signal handlers will not be invoked.
+ *
+ * The region between byteLength and mappedSize is the SLOP - an area where we use
+ * signal handlers to catch things that slip by bounds checks. Logically it has
+ * two parts:
+ *
+ * - from byteLength to boundsCheckLimit - this part of the SLOP serves to catch
+ * accesses to memory we have reserved but not yet grown into. This allows us
+ * to grow memory up to max (when present) without having to patch/update the
+ * bounds checks.
+ *
+ * - from boundsCheckLimit to mappedSize - this part of the SLOP allows us to
+ * bounds check against base pointers and fold some constant offsets inside
+ * loads. This enables better Bounds Check Elimination. See "Linear memory
+ * addresses and bounds checking" in wasm/WasmMemory.cpp.
+ *
+ */
+/* clang-format on */
+
+[[nodiscard]] bool WasmArrayRawBuffer::growToPagesInPlace(Pages newPages) {
+ size_t newSize = newPages.byteLength();
+ size_t oldSize = byteLength();
+
+ MOZ_ASSERT(newSize >= oldSize);
+ MOZ_ASSERT(newPages <= clampedMaxPages());
+ MOZ_ASSERT(newSize <= mappedSize());
+
+ size_t delta = newSize - oldSize;
+ MOZ_ASSERT(delta % wasm::PageSize == 0);
+
+ uint8_t* dataEnd = dataPointer() + oldSize;
+ MOZ_ASSERT(uintptr_t(dataEnd) % gc::SystemPageSize() == 0);
+
+ if (delta && !CommitBufferMemory(dataEnd, delta)) {
+ return false;
+ }
+
+ length_ = newSize;
+
+ return true;
+}
+
+bool WasmArrayRawBuffer::extendMappedSize(Pages maxPages) {
+ size_t newMappedSize = wasm::ComputeMappedSize(maxPages);
+ MOZ_ASSERT(mappedSize_ <= newMappedSize);
+ if (mappedSize_ == newMappedSize) {
+ return true;
+ }
+
+ if (!ExtendBufferMapping(dataPointer(), mappedSize_, newMappedSize)) {
+ return false;
+ }
+
+ mappedSize_ = newMappedSize;
+ return true;
+}
+
+void WasmArrayRawBuffer::tryGrowMaxPagesInPlace(Pages deltaMaxPages) {
+ Pages newMaxPages = clampedMaxPages_;
+
+ DebugOnly<bool> valid = newMaxPages.checkedIncrement(deltaMaxPages);
+ // Caller must ensure increment does not overflow or increase over the
+ // specified maximum pages.
+ MOZ_ASSERT(valid);
+ MOZ_ASSERT_IF(sourceMaxPages_.isSome(), newMaxPages <= *sourceMaxPages_);
+
+ if (!extendMappedSize(newMaxPages)) {
+ return;
+ }
+ clampedMaxPages_ = newMaxPages;
+}
+
+void WasmArrayRawBuffer::discard(size_t byteOffset, size_t byteLen) {
+ uint8_t* memBase = dataPointer();
+
+ // The caller is responsible for ensuring these conditions are met; see this
+ // function's comment in ArrayBufferObject.h.
+ MOZ_ASSERT(byteOffset % wasm::PageSize == 0);
+ MOZ_ASSERT(byteLen % wasm::PageSize == 0);
+ MOZ_ASSERT(wasm::MemoryBoundsCheck(uint64_t(byteOffset), uint64_t(byteLen),
+ byteLength()));
+
+ // Discarding zero bytes "succeeds" with no effect.
+ if (byteLen == 0) {
+ return;
+ }
+
+ void* addr = memBase + uintptr_t(byteOffset);
+
+ // On POSIX-ish platforms, we discard memory by overwriting previously-mapped
+ // pages with freshly-mapped pages (which are all zeroed). The operating
+ // system recognizes this and decreases the process RSS, and eventually
+ // collects the abandoned physical pages.
+ //
+ // On Windows, committing over previously-committed pages has no effect, and
+ // the memory must be explicitly decommitted first. This is not the same as an
+ // munmap; the address space is still reserved.
+
+#ifdef XP_WIN
+ if (!VirtualFree(addr, byteLen, MEM_DECOMMIT)) {
+ MOZ_CRASH("wasm discard: failed to decommit memory");
+ }
+ if (!VirtualAlloc(addr, byteLen, MEM_COMMIT, PAGE_READWRITE)) {
+ MOZ_CRASH("wasm discard: decommitted memory but failed to recommit");
+ };
+#elif defined(__wasi__)
+ memset(addr, 0, byteLen);
+#else // !XP_WIN
+ void* data = MozTaggedAnonymousMmap(addr, byteLen, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0,
+ "wasm-reserved");
+ if (data == MAP_FAILED) {
+ MOZ_CRASH("failed to discard wasm memory; memory mappings may be broken");
+ }
+#endif
+}
+
+/* static */
+WasmArrayRawBuffer* WasmArrayRawBuffer::AllocateWasm(
+ IndexType indexType, Pages initialPages, Pages clampedMaxPages,
+ const Maybe<Pages>& sourceMaxPages, const Maybe<size_t>& mapped) {
+ // Prior code has asserted that initial pages is within our implementation
+ // limits (wasm::MaxMemoryPages) and we can assume it is a valid size_t.
+ MOZ_ASSERT(initialPages.hasByteLength());
+ size_t numBytes = initialPages.byteLength();
+
+ // If there is a specified maximum, attempt to map the whole range for
+ // clampedMaxPages. Or else map only what's required for initialPages.
+ Pages initialMappedPages =
+ sourceMaxPages.isSome() ? clampedMaxPages : initialPages;
+
+ // Use an override mapped size, or else compute the mapped size from
+ // initialMappedPages.
+ size_t mappedSize =
+ mapped.isSome() ? *mapped : wasm::ComputeMappedSize(initialMappedPages);
+
+ MOZ_RELEASE_ASSERT(mappedSize <= SIZE_MAX - gc::SystemPageSize());
+ MOZ_RELEASE_ASSERT(numBytes <= SIZE_MAX - gc::SystemPageSize());
+ MOZ_RELEASE_ASSERT(initialPages <= clampedMaxPages);
+ MOZ_ASSERT(numBytes % gc::SystemPageSize() == 0);
+ MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
+
+ uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize();
+ uint64_t numBytesWithHeader = numBytes + gc::SystemPageSize();
+
+ void* data = MapBufferMemory(indexType, (size_t)mappedSizeWithHeader,
+ (size_t)numBytesWithHeader);
+ if (!data) {
+ return nullptr;
+ }
+
+ uint8_t* base = reinterpret_cast<uint8_t*>(data) + gc::SystemPageSize();
+ uint8_t* header = base - sizeof(WasmArrayRawBuffer);
+
+ auto rawBuf = new (header) WasmArrayRawBuffer(
+ indexType, base, clampedMaxPages, sourceMaxPages, mappedSize, numBytes);
+ return rawBuf;
+}
+
+/* static */
+void WasmArrayRawBuffer::Release(void* mem) {
+ WasmArrayRawBuffer* header =
+ (WasmArrayRawBuffer*)((uint8_t*)mem - sizeof(WasmArrayRawBuffer));
+
+ MOZ_RELEASE_ASSERT(header->mappedSize() <= SIZE_MAX - gc::SystemPageSize());
+ size_t mappedSizeWithHeader = header->mappedSize() + gc::SystemPageSize();
+
+ static_assert(std::is_trivially_destructible_v<WasmArrayRawBuffer>,
+ "no need to call the destructor");
+
+ UnmapBufferMemory(header->indexType(), header->basePointer(),
+ mappedSizeWithHeader);
+}
+
+WasmArrayRawBuffer* ArrayBufferObject::BufferContents::wasmBuffer() const {
+ MOZ_RELEASE_ASSERT(kind_ == WASM);
+ return (WasmArrayRawBuffer*)(data_ - sizeof(WasmArrayRawBuffer));
+}
+
+template <typename ObjT, typename RawbufT>
+static bool CreateSpecificWasmBuffer(
+ JSContext* cx, const wasm::MemoryDesc& memory,
+ MutableHandleArrayBufferObjectMaybeShared maybeSharedObject) {
+ bool useHugeMemory = wasm::IsHugeMemoryEnabled(memory.indexType());
+ Pages initialPages = memory.initialPages();
+ Maybe<Pages> sourceMaxPages = memory.maximumPages();
+ Pages clampedMaxPages = wasm::ClampedMaxPages(
+ memory.indexType(), initialPages, sourceMaxPages, useHugeMemory);
+
+ Maybe<size_t> mappedSize;
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+ // Override the mapped size if we are using huge memory. If we are not, then
+ // it will be calculated by the raw buffer we are using.
+ if (useHugeMemory) {
+ mappedSize = Some(wasm::HugeMappedSize);
+ }
+#endif
+
+ RawbufT* buffer =
+ RawbufT::AllocateWasm(memory.limits.indexType, initialPages,
+ clampedMaxPages, sourceMaxPages, mappedSize);
+ if (!buffer) {
+ if (useHugeMemory) {
+ WarnNumberASCII(cx, JSMSG_WASM_HUGE_MEMORY_FAILED);
+ if (cx->isExceptionPending()) {
+ cx->clearPendingException();
+ }
+
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // If we fail, and have a sourceMaxPages, try to reserve the biggest
+ // chunk in the range [initialPages, clampedMaxPages) using log backoff.
+ if (!sourceMaxPages) {
+ wasm::Log(cx, "new Memory({initial=%" PRIu64 " pages}) failed",
+ initialPages.value());
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ uint64_t cur = clampedMaxPages.value() / 2;
+ for (; Pages(cur) > initialPages; cur /= 2) {
+ buffer = RawbufT::AllocateWasm(memory.limits.indexType, initialPages,
+ Pages(cur), sourceMaxPages, mappedSize);
+ if (buffer) {
+ break;
+ }
+ }
+
+ if (!buffer) {
+ wasm::Log(cx, "new Memory({initial=%" PRIu64 " pages}) failed",
+ initialPages.value());
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Try to grow our chunk as much as possible.
+ for (size_t d = cur / 2; d >= 1; d /= 2) {
+ buffer->tryGrowMaxPagesInPlace(Pages(d));
+ }
+ }
+
+ // ObjT::createFromNewRawBuffer assumes ownership of |buffer| even in case
+ // of failure.
+ RootedArrayBufferObjectMaybeShared object(
+ cx, ObjT::createFromNewRawBuffer(cx, buffer, initialPages.byteLength()));
+ if (!object) {
+ return false;
+ }
+
+ maybeSharedObject.set(object);
+
+ // See MaximumLiveMappedBuffers comment above.
+ if (wasmReservedBytes > WasmReservedBytesStartSyncFullGC) {
+ JS::PrepareForFullGC(cx);
+ JS::NonIncrementalGC(cx, JS::GCOptions::Normal,
+ JS::GCReason::TOO_MUCH_WASM_MEMORY);
+ wasmReservedBytesSinceLast = 0;
+ } else if (wasmReservedBytes > WasmReservedBytesStartTriggering) {
+ wasmReservedBytesSinceLast += uint64_t(buffer->mappedSize());
+ if (wasmReservedBytesSinceLast > WasmReservedBytesPerTrigger) {
+ (void)cx->runtime()->gc.triggerGC(JS::GCReason::TOO_MUCH_WASM_MEMORY);
+ wasmReservedBytesSinceLast = 0;
+ }
+ } else {
+ wasmReservedBytesSinceLast = 0;
+ }
+
+ // Log the result with details on the memory allocation
+ if (sourceMaxPages) {
+ if (useHugeMemory) {
+ wasm::Log(cx,
+ "new Memory({initial:%" PRIu64 " pages, maximum:%" PRIu64
+ " pages}) succeeded",
+ initialPages.value(), sourceMaxPages->value());
+ } else {
+ wasm::Log(cx,
+ "new Memory({initial:%" PRIu64 " pages, maximum:%" PRIu64
+ " pages}) succeeded "
+ "with internal maximum of %" PRIu64 " pages",
+ initialPages.value(), sourceMaxPages->value(),
+ object->wasmClampedMaxPages().value());
+ }
+ } else {
+ wasm::Log(cx, "new Memory({initial:%" PRIu64 " pages}) succeeded",
+ initialPages.value());
+ }
+
+ return true;
+}
+
+bool js::CreateWasmBuffer(JSContext* cx, const wasm::MemoryDesc& memory,
+ MutableHandleArrayBufferObjectMaybeShared buffer) {
+ MOZ_RELEASE_ASSERT(memory.initialPages() <=
+ wasm::MaxMemoryPages(memory.indexType()));
+ MOZ_RELEASE_ASSERT(cx->wasm().haveSignalHandlers);
+
+ if (memory.isShared()) {
+ if (!cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_NO_SHMEM_LINK);
+ return false;
+ }
+ return CreateSpecificWasmBuffer<SharedArrayBufferObject,
+ WasmSharedArrayRawBuffer>(cx, memory,
+ buffer);
+ }
+ return CreateSpecificWasmBuffer<ArrayBufferObject, WasmArrayRawBuffer>(
+ cx, memory, buffer);
+}
+
+bool ArrayBufferObject::prepareForAsmJS() {
+ MOZ_ASSERT(byteLength() % wasm::PageSize == 0,
+ "prior size checking should have guaranteed page-size multiple");
+ MOZ_ASSERT(byteLength() > 0,
+ "prior size checking should have excluded empty buffers");
+
+ switch (bufferKind()) {
+ case MALLOCED:
+ case MAPPED:
+ case EXTERNAL:
+ // It's okay if this uselessly sets the flag a second time.
+ setIsPreparedForAsmJS();
+ return true;
+
+ case INLINE_DATA:
+ static_assert(wasm::PageSize > MaxInlineBytes,
+ "inline data must be too small to be a page size multiple");
+ MOZ_ASSERT_UNREACHABLE(
+ "inline-data buffers should be implicitly excluded by size checks");
+ return false;
+
+ case NO_DATA:
+ MOZ_ASSERT_UNREACHABLE(
+ "size checking should have excluded detached or empty buffers");
+ return false;
+
+ // asm.js code and associated buffers are potentially long-lived. Yet a
+ // buffer of user-owned data *must* be detached by the user before the
+ // user-owned data is disposed. No caller wants to use a user-owned
+ // ArrayBuffer with asm.js, so just don't support this and avoid a mess of
+ // complexity.
+ case USER_OWNED:
+ // wasm buffers can be detached at any time.
+ case WASM:
+ MOZ_ASSERT(!isPreparedForAsmJS());
+ return false;
+
+ case BAD1:
+ MOZ_ASSERT_UNREACHABLE("invalid bufferKind() encountered");
+ return false;
+ }
+
+ MOZ_ASSERT_UNREACHABLE("non-exhaustive kind-handling switch?");
+ return false;
+}
+
+ArrayBufferObject::BufferContents ArrayBufferObject::createMappedContents(
+ int fd, size_t offset, size_t length) {
+ void* data =
+ gc::AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT);
+ return BufferContents::createMapped(data);
+}
+
+uint8_t* ArrayBufferObject::inlineDataPointer() const {
+ return static_cast<uint8_t*>(fixedData(JSCLASS_RESERVED_SLOTS(&class_)));
+}
+
+uint8_t* ArrayBufferObject::dataPointer() const {
+ return static_cast<uint8_t*>(getFixedSlot(DATA_SLOT).toPrivate());
+}
+
+SharedMem<uint8_t*> ArrayBufferObject::dataPointerShared() const {
+ return SharedMem<uint8_t*>::unshared(getFixedSlot(DATA_SLOT).toPrivate());
+}
+
+ArrayBufferObject::FreeInfo* ArrayBufferObject::freeInfo() const {
+ MOZ_ASSERT(isExternal());
+ return reinterpret_cast<FreeInfo*>(inlineDataPointer());
+}
+
+void ArrayBufferObject::releaseData(JS::GCContext* gcx) {
+ switch (bufferKind()) {
+ case INLINE_DATA:
+ // Inline data doesn't require releasing.
+ break;
+ case MALLOCED:
+ gcx->free_(this, dataPointer(), byteLength(),
+ MemoryUse::ArrayBufferContents);
+ break;
+ case NO_DATA:
+ // There's nothing to release if there's no data.
+ MOZ_ASSERT(dataPointer() == nullptr);
+ break;
+ case USER_OWNED:
+ // User-owned data is released by, well, the user.
+ break;
+ case MAPPED:
+ gc::DeallocateMappedContent(dataPointer(), byteLength());
+ gcx->removeCellMemory(this, associatedBytes(),
+ MemoryUse::ArrayBufferContents);
+ break;
+ case WASM:
+ WasmArrayRawBuffer::Release(dataPointer());
+ gcx->removeCellMemory(this, byteLength(), MemoryUse::ArrayBufferContents);
+ break;
+ case EXTERNAL:
+ if (freeInfo()->freeFunc) {
+ // The analyzer can't know for sure whether the embedder-supplied
+ // free function will GC. We give the analyzer a hint here.
+ // (Doing a GC in the free function is considered a programmer
+ // error.)
+ JS::AutoSuppressGCAnalysis nogc;
+ freeInfo()->freeFunc(dataPointer(), freeInfo()->freeUserData);
+ }
+ break;
+ case BAD1:
+ MOZ_CRASH("invalid BufferKind encountered");
+ break;
+ }
+}
+
+void ArrayBufferObject::setDataPointer(BufferContents contents) {
+ setFixedSlot(DATA_SLOT, PrivateValue(contents.data()));
+ setFlags((flags() & ~KIND_MASK) | contents.kind());
+
+ if (isExternal()) {
+ auto info = freeInfo();
+ info->freeFunc = contents.freeFunc();
+ info->freeUserData = contents.freeUserData();
+ }
+}
+
+size_t ArrayBufferObject::byteLength() const {
+ return size_t(getFixedSlot(BYTE_LENGTH_SLOT).toPrivate());
+}
+
+inline size_t ArrayBufferObject::associatedBytes() const {
+ if (bufferKind() == MALLOCED) {
+ return byteLength();
+ }
+ if (bufferKind() == MAPPED) {
+ return RoundUp(byteLength(), js::gc::SystemPageSize());
+ }
+ MOZ_CRASH("Unexpected buffer kind");
+}
+
+void ArrayBufferObject::setByteLength(size_t length) {
+ MOZ_ASSERT(length <= ArrayBufferObject::MaxByteLength);
+ setFixedSlot(BYTE_LENGTH_SLOT, PrivateValue(length));
+}
+
+size_t ArrayBufferObject::wasmMappedSize() const {
+ if (isWasm()) {
+ return contents().wasmBuffer()->mappedSize();
+ }
+ return byteLength();
+}
+
+IndexType ArrayBufferObject::wasmIndexType() const {
+ if (isWasm()) {
+ return contents().wasmBuffer()->indexType();
+ }
+ MOZ_ASSERT(isPreparedForAsmJS());
+ return wasm::IndexType::I32;
+}
+
+Pages ArrayBufferObject::wasmPages() const {
+ if (isWasm()) {
+ return contents().wasmBuffer()->pages();
+ }
+ MOZ_ASSERT(isPreparedForAsmJS());
+ return Pages::fromByteLengthExact(byteLength());
+}
+
+Pages ArrayBufferObject::wasmClampedMaxPages() const {
+ if (isWasm()) {
+ return contents().wasmBuffer()->clampedMaxPages();
+ }
+ MOZ_ASSERT(isPreparedForAsmJS());
+ return Pages::fromByteLengthExact(byteLength());
+}
+
+Maybe<Pages> ArrayBufferObject::wasmSourceMaxPages() const {
+ if (isWasm()) {
+ return contents().wasmBuffer()->sourceMaxPages();
+ }
+ MOZ_ASSERT(isPreparedForAsmJS());
+ return Some<Pages>(Pages::fromByteLengthExact(byteLength()));
+}
+
+size_t js::WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf) {
+ if (buf->is<ArrayBufferObject>()) {
+ return buf->as<ArrayBufferObject>().wasmMappedSize();
+ }
+ return buf->as<SharedArrayBufferObject>().wasmMappedSize();
+}
+
+IndexType js::WasmArrayBufferIndexType(
+ const ArrayBufferObjectMaybeShared* buf) {
+ if (buf->is<ArrayBufferObject>()) {
+ return buf->as<ArrayBufferObject>().wasmIndexType();
+ }
+ return buf->as<SharedArrayBufferObject>().wasmIndexType();
+}
+Pages js::WasmArrayBufferPages(const ArrayBufferObjectMaybeShared* buf) {
+ if (buf->is<ArrayBufferObject>()) {
+ return buf->as<ArrayBufferObject>().wasmPages();
+ }
+ return buf->as<SharedArrayBufferObject>().volatileWasmPages();
+}
+Pages js::WasmArrayBufferClampedMaxPages(
+ const ArrayBufferObjectMaybeShared* buf) {
+ if (buf->is<ArrayBufferObject>()) {
+ return buf->as<ArrayBufferObject>().wasmClampedMaxPages();
+ }
+ return buf->as<SharedArrayBufferObject>().wasmClampedMaxPages();
+}
+Maybe<Pages> js::WasmArrayBufferSourceMaxPages(
+ const ArrayBufferObjectMaybeShared* buf) {
+ if (buf->is<ArrayBufferObject>()) {
+ return buf->as<ArrayBufferObject>().wasmSourceMaxPages();
+ }
+ return Some(buf->as<SharedArrayBufferObject>().wasmSourceMaxPages());
+}
+
+static void CheckStealPreconditions(Handle<ArrayBufferObject*> buffer,
+ JSContext* cx) {
+ cx->check(buffer);
+
+ MOZ_ASSERT(!buffer->isDetached(), "can't steal from a detached buffer");
+ MOZ_ASSERT(!buffer->isPreparedForAsmJS(),
+ "asm.js-prepared buffers don't have detachable/stealable data");
+}
+
+/* static */
+bool ArrayBufferObject::wasmGrowToPagesInPlace(
+ wasm::IndexType t, Pages newPages, HandleArrayBufferObject oldBuf,
+ MutableHandleArrayBufferObject newBuf, JSContext* cx) {
+ CheckStealPreconditions(oldBuf, cx);
+
+ MOZ_ASSERT(oldBuf->isWasm());
+
+ // Check that the new pages is within our allowable range. This will
+ // simultaneously check against the maximum specified in source and our
+ // implementation limits.
+ if (newPages > oldBuf->wasmClampedMaxPages()) {
+ return false;
+ }
+ MOZ_ASSERT(newPages <= wasm::MaxMemoryPages(t) &&
+ newPages.byteLength() <= ArrayBufferObject::MaxByteLength);
+
+ // We have checked against the clamped maximum and so we know we can convert
+ // to byte lengths now.
+ size_t newSize = newPages.byteLength();
+
+ // On failure, do not throw and ensure that the original buffer is
+ // unmodified and valid. After WasmArrayRawBuffer::growToPagesInPlace(), the
+ // wasm-visible length of the buffer has been increased so it must be the
+ // last fallible operation.
+
+ newBuf.set(ArrayBufferObject::createEmpty(cx));
+ if (!newBuf) {
+ cx->clearPendingException();
+ return false;
+ }
+
+ MOZ_ASSERT(newBuf->isNoData());
+
+ if (!oldBuf->contents().wasmBuffer()->growToPagesInPlace(newPages)) {
+ return false;
+ }
+
+ // Extract the grown contents from |oldBuf|.
+ BufferContents oldContents = oldBuf->contents();
+
+ // Overwrite |oldBuf|'s data pointer *without* releasing old data.
+ oldBuf->setDataPointer(BufferContents::createNoData());
+
+ // Detach |oldBuf| now that doing so won't release |oldContents|.
+ RemoveCellMemory(oldBuf, oldBuf->byteLength(),
+ MemoryUse::ArrayBufferContents);
+ ArrayBufferObject::detach(cx, oldBuf);
+
+ // Set |newBuf|'s contents to |oldBuf|'s original contents.
+ newBuf->initialize(newSize, oldContents);
+ AddCellMemory(newBuf, newSize, MemoryUse::ArrayBufferContents);
+
+ return true;
+}
+
+/* static */
+bool ArrayBufferObject::wasmMovingGrowToPages(
+ IndexType t, Pages newPages, HandleArrayBufferObject oldBuf,
+ MutableHandleArrayBufferObject newBuf, JSContext* cx) {
+ // On failure, do not throw and ensure that the original buffer is
+ // unmodified and valid.
+
+ // Check that the new pages is within our allowable range. This will
+ // simultaneously check against the maximum specified in source and our
+ // implementation limits.
+ if (newPages > oldBuf->wasmClampedMaxPages()) {
+ return false;
+ }
+ MOZ_ASSERT(newPages <= wasm::MaxMemoryPages(t) &&
+ newPages.byteLength() < ArrayBufferObject::MaxByteLength);
+
+ // We have checked against the clamped maximum and so we know we can convert
+ // to byte lengths now.
+ size_t newSize = newPages.byteLength();
+
+ if (wasm::ComputeMappedSize(newPages) <= oldBuf->wasmMappedSize() ||
+ oldBuf->contents().wasmBuffer()->extendMappedSize(newPages)) {
+ return wasmGrowToPagesInPlace(t, newPages, oldBuf, newBuf, cx);
+ }
+
+ newBuf.set(ArrayBufferObject::createEmpty(cx));
+ if (!newBuf) {
+ cx->clearPendingException();
+ return false;
+ }
+
+ Pages clampedMaxPages =
+ wasm::ClampedMaxPages(t, newPages, Nothing(), /* hugeMemory */ false);
+ WasmArrayRawBuffer* newRawBuf = WasmArrayRawBuffer::AllocateWasm(
+ oldBuf->wasmIndexType(), newPages, clampedMaxPages, Nothing(), Nothing());
+ if (!newRawBuf) {
+ return false;
+ }
+
+ AddCellMemory(newBuf, newSize, MemoryUse::ArrayBufferContents);
+
+ BufferContents contents =
+ BufferContents::createWasm(newRawBuf->dataPointer());
+ newBuf->initialize(newSize, contents);
+
+ memcpy(newBuf->dataPointer(), oldBuf->dataPointer(), oldBuf->byteLength());
+ ArrayBufferObject::detach(cx, oldBuf);
+ return true;
+}
+
+/* static */
+void ArrayBufferObject::wasmDiscard(HandleArrayBufferObject buf,
+ uint64_t byteOffset, uint64_t byteLen) {
+ MOZ_ASSERT(buf->isWasm());
+ buf->contents().wasmBuffer()->discard(byteOffset, byteLen);
+}
+
+uint32_t ArrayBufferObject::flags() const {
+ return uint32_t(getFixedSlot(FLAGS_SLOT).toInt32());
+}
+
+void ArrayBufferObject::setFlags(uint32_t flags) {
+ setFixedSlot(FLAGS_SLOT, Int32Value(flags));
+}
+
+static inline js::gc::AllocKind GetArrayBufferGCObjectKind(size_t numSlots) {
+ if (numSlots <= 4) {
+ return js::gc::AllocKind::ARRAYBUFFER4;
+ }
+ if (numSlots <= 8) {
+ return js::gc::AllocKind::ARRAYBUFFER8;
+ }
+ if (numSlots <= 12) {
+ return js::gc::AllocKind::ARRAYBUFFER12;
+ }
+ return js::gc::AllocKind::ARRAYBUFFER16;
+}
+
+static ArrayBufferObject* NewArrayBufferObject(JSContext* cx,
+ HandleObject proto_,
+ gc::AllocKind allocKind) {
+ MOZ_ASSERT(allocKind == gc::AllocKind::ARRAYBUFFER4 ||
+ allocKind == gc::AllocKind::ARRAYBUFFER8 ||
+ allocKind == gc::AllocKind::ARRAYBUFFER12 ||
+ allocKind == gc::AllocKind::ARRAYBUFFER16);
+
+ RootedObject proto(cx, proto_);
+ if (!proto) {
+ proto = GlobalObject::getOrCreatePrototype(cx, JSProto_ArrayBuffer);
+ if (!proto) {
+ return nullptr;
+ }
+ }
+
+ const JSClass* clasp = &ArrayBufferObject::class_;
+
+ // Array buffers can store data inline so we only use fixed slots to cover the
+ // reserved slots, ignoring the AllocKind.
+ MOZ_ASSERT(ClassCanHaveFixedData(clasp));
+ constexpr size_t nfixed = ArrayBufferObject::RESERVED_SLOTS;
+ static_assert(nfixed <= NativeObject::MAX_FIXED_SLOTS);
+
+ Rooted<SharedShape*> shape(
+ cx,
+ SharedShape::getInitialShape(cx, clasp, cx->realm(), AsTaggedProto(proto),
+ nfixed, ObjectFlags()));
+ if (!shape) {
+ return nullptr;
+ }
+
+ // Array buffers can't be nursery allocated but can be background-finalized.
+ MOZ_ASSERT(IsBackgroundFinalized(allocKind));
+ MOZ_ASSERT(!CanNurseryAllocateFinalizedClass(clasp));
+ constexpr gc::Heap heap = gc::Heap::Tenured;
+
+ NativeObject* obj = NativeObject::create(cx, allocKind, heap, shape);
+ if (!obj) {
+ return nullptr;
+ }
+
+ return &obj->as<ArrayBufferObject>();
+}
+
+// Creates a new ArrayBufferObject with %ArrayBuffer.prototype% as proto and no
+// space for inline data.
+static ArrayBufferObject* NewArrayBufferObject(JSContext* cx) {
+ static_assert(ArrayBufferObject::RESERVED_SLOTS == 4);
+ return NewArrayBufferObject(cx, nullptr, gc::AllocKind::ARRAYBUFFER4);
+}
+
+ArrayBufferObject* ArrayBufferObject::createForContents(
+ JSContext* cx, size_t nbytes, BufferContents contents) {
+ MOZ_ASSERT(contents);
+ MOZ_ASSERT(contents.kind() != INLINE_DATA);
+ MOZ_ASSERT(contents.kind() != NO_DATA);
+ MOZ_ASSERT(contents.kind() != WASM);
+
+ // 24.1.1.1, step 3 (Inlined 6.2.6.1 CreateByteDataBlock, step 2).
+ if (!CheckArrayBufferTooLarge(cx, nbytes)) {
+ return nullptr;
+ }
+
+ // Some |contents| kinds need to store extra data in the ArrayBuffer beyond a
+ // data pointer. If needed for the particular kind, add extra fixed slots to
+ // the ArrayBuffer for use as raw storage to store such information.
+ constexpr size_t reservedSlots = ArrayBufferObject::RESERVED_SLOTS;
+
+ size_t nAllocated = 0;
+ size_t nslots = reservedSlots;
+ if (contents.kind() == USER_OWNED) {
+ // No accounting to do in this case.
+ } else if (contents.kind() == EXTERNAL) {
+ // Store the FreeInfo in the inline data slots so that we
+ // don't use up slots for it in non-refcounted array buffers.
+ size_t freeInfoSlots = HowMany(sizeof(FreeInfo), sizeof(Value));
+ MOZ_ASSERT(reservedSlots + freeInfoSlots <= NativeObject::MAX_FIXED_SLOTS,
+ "FreeInfo must fit in inline slots");
+ nslots += freeInfoSlots;
+ } else {
+ // The ABO is taking ownership, so account the bytes against the zone.
+ nAllocated = nbytes;
+ if (contents.kind() == MAPPED) {
+ nAllocated = RoundUp(nbytes, js::gc::SystemPageSize());
+ } else {
+ MOZ_ASSERT(contents.kind() == MALLOCED,
+ "should have handled all possible callers' kinds");
+ }
+ }
+
+ gc::AllocKind allocKind = GetArrayBufferGCObjectKind(nslots);
+
+ AutoSetNewObjectMetadata metadata(cx);
+ Rooted<ArrayBufferObject*> buffer(
+ cx, NewArrayBufferObject(cx, nullptr, allocKind));
+ if (!buffer) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(!gc::IsInsideNursery(buffer),
+ "ArrayBufferObject has a finalizer that must be called to not "
+ "leak in some cases, so it can't be nursery-allocated");
+
+ buffer->initialize(nbytes, contents);
+
+ if (contents.kind() == MAPPED || contents.kind() == MALLOCED) {
+ AddCellMemory(buffer, nAllocated, MemoryUse::ArrayBufferContents);
+ }
+
+ return buffer;
+}
+
+template <ArrayBufferObject::FillContents FillType>
+/* static */ std::tuple<ArrayBufferObject*, uint8_t*>
+ArrayBufferObject::createBufferAndData(
+ JSContext* cx, size_t nbytes, AutoSetNewObjectMetadata&,
+ JS::Handle<JSObject*> proto /* = nullptr */) {
+ MOZ_ASSERT(nbytes <= ArrayBufferObject::MaxByteLength,
+ "caller must validate the byte count it passes");
+
+ // Try fitting the data inline with the object by repurposing fixed-slot
+ // storage. Add extra fixed slots if necessary to accomplish this, but don't
+ // exceed the maximum number of fixed slots!
+ size_t nslots = ArrayBufferObject::RESERVED_SLOTS;
+ ArrayBufferContents data;
+ if (nbytes <= MaxInlineBytes) {
+ int newSlots = HowMany(nbytes, sizeof(Value));
+ MOZ_ASSERT(int(nbytes) <= newSlots * int(sizeof(Value)));
+
+ nslots += newSlots;
+ } else {
+ data = FillType == FillContents::Uninitialized
+ ? AllocateUninitializedArrayBufferContents(cx, nbytes)
+ : AllocateArrayBufferContents(cx, nbytes);
+ if (!data) {
+ return {nullptr, nullptr};
+ }
+ }
+
+ gc::AllocKind allocKind = GetArrayBufferGCObjectKind(nslots);
+
+ ArrayBufferObject* buffer = NewArrayBufferObject(cx, proto, allocKind);
+ if (!buffer) {
+ return {nullptr, nullptr};
+ }
+
+ MOZ_ASSERT(!gc::IsInsideNursery(buffer),
+ "ArrayBufferObject has a finalizer that must be called to not "
+ "leak in some cases, so it can't be nursery-allocated");
+
+ uint8_t* toFill;
+ if (data) {
+ toFill = data.release();
+ buffer->initialize(nbytes, BufferContents::createMalloced(toFill));
+ AddCellMemory(buffer, nbytes, MemoryUse::ArrayBufferContents);
+ } else {
+ toFill = static_cast<uint8_t*>(buffer->initializeToInlineData(nbytes));
+ if constexpr (FillType == FillContents::Zero) {
+ memset(toFill, 0, nbytes);
+ }
+ }
+
+ return {buffer, toFill};
+}
+
+/* static */ ArrayBufferObject* ArrayBufferObject::copy(
+ JSContext* cx, JS::Handle<ArrayBufferObject*> unwrappedArrayBuffer) {
+ if (unwrappedArrayBuffer->isDetached()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_DETACHED);
+ return nullptr;
+ }
+
+ size_t nbytes = unwrappedArrayBuffer->byteLength();
+
+ AutoSetNewObjectMetadata metadata(cx);
+ auto [buffer, toFill] = createBufferAndData<FillContents::Uninitialized>(
+ cx, nbytes, metadata, nullptr);
+ if (!buffer) {
+ return nullptr;
+ }
+
+ std::uninitialized_copy_n(unwrappedArrayBuffer->dataPointer(), nbytes,
+ toFill);
+ return buffer;
+}
+
+ArrayBufferObject* ArrayBufferObject::createZeroed(
+ JSContext* cx, size_t nbytes, HandleObject proto /* = nullptr */) {
+ // 24.1.1.1, step 3 (Inlined 6.2.6.1 CreateByteDataBlock, step 2).
+ if (!CheckArrayBufferTooLarge(cx, nbytes)) {
+ return nullptr;
+ }
+
+ AutoSetNewObjectMetadata metadata(cx);
+ auto [buffer, toFill] =
+ createBufferAndData<FillContents::Zero>(cx, nbytes, metadata, proto);
+ (void)toFill;
+ return buffer;
+}
+
+ArrayBufferObject* ArrayBufferObject::createEmpty(JSContext* cx) {
+ AutoSetNewObjectMetadata metadata(cx);
+ ArrayBufferObject* obj = NewArrayBufferObject(cx);
+ if (!obj) {
+ return nullptr;
+ }
+
+ obj->initialize(0, BufferContents::createNoData());
+ return obj;
+}
+
+ArrayBufferObject* ArrayBufferObject::createFromNewRawBuffer(
+ JSContext* cx, WasmArrayRawBuffer* rawBuffer, size_t initialSize) {
+ AutoSetNewObjectMetadata metadata(cx);
+ ArrayBufferObject* buffer = NewArrayBufferObject(cx);
+ if (!buffer) {
+ WasmArrayRawBuffer::Release(rawBuffer->dataPointer());
+ return nullptr;
+ }
+
+ MOZ_ASSERT(initialSize == rawBuffer->byteLength());
+
+ buffer->setByteLength(initialSize);
+ buffer->setFlags(0);
+ buffer->setFirstView(nullptr);
+
+ auto contents = BufferContents::createWasm(rawBuffer->dataPointer());
+ buffer->setDataPointer(contents);
+
+ AddCellMemory(buffer, initialSize, MemoryUse::ArrayBufferContents);
+
+ return buffer;
+}
+
+/* static */ uint8_t* ArrayBufferObject::stealMallocedContents(
+ JSContext* cx, Handle<ArrayBufferObject*> buffer) {
+ CheckStealPreconditions(buffer, cx);
+
+ switch (buffer->bufferKind()) {
+ case MALLOCED: {
+ uint8_t* stolenData = buffer->dataPointer();
+ MOZ_ASSERT(stolenData);
+
+ RemoveCellMemory(buffer, buffer->byteLength(),
+ MemoryUse::ArrayBufferContents);
+
+ // Overwrite the old data pointer *without* releasing the contents
+ // being stolen.
+ buffer->setDataPointer(BufferContents::createNoData());
+
+ // Detach |buffer| now that doing so won't free |stolenData|.
+ ArrayBufferObject::detach(cx, buffer);
+ return stolenData;
+ }
+
+ case INLINE_DATA:
+ case NO_DATA:
+ case USER_OWNED:
+ case MAPPED:
+ case EXTERNAL: {
+ // We can't use these data types directly. Make a copy to return.
+ ArrayBufferContents copiedData = NewCopiedBufferContents(cx, buffer);
+ if (!copiedData) {
+ return nullptr;
+ }
+
+ // Detach |buffer|. This immediately releases the currently owned
+ // contents, freeing or unmapping data in the MAPPED and EXTERNAL cases.
+ ArrayBufferObject::detach(cx, buffer);
+ return copiedData.release();
+ }
+
+ case WASM:
+ MOZ_ASSERT_UNREACHABLE(
+ "wasm buffers aren't stealable except by a "
+ "memory.grow operation that shouldn't call this "
+ "function");
+ return nullptr;
+
+ case BAD1:
+ MOZ_ASSERT_UNREACHABLE("bad kind when stealing malloc'd data");
+ return nullptr;
+ }
+
+ MOZ_ASSERT_UNREACHABLE("garbage kind computed");
+ return nullptr;
+}
+
+/* static */ ArrayBufferObject::BufferContents
+ArrayBufferObject::extractStructuredCloneContents(
+ JSContext* cx, Handle<ArrayBufferObject*> buffer) {
+ CheckStealPreconditions(buffer, cx);
+
+ BufferContents contents = buffer->contents();
+
+ switch (contents.kind()) {
+ case INLINE_DATA:
+ case NO_DATA:
+ case USER_OWNED: {
+ ArrayBufferContents copiedData = NewCopiedBufferContents(cx, buffer);
+ if (!copiedData) {
+ return BufferContents::createFailed();
+ }
+
+ ArrayBufferObject::detach(cx, buffer);
+ return BufferContents::createMalloced(copiedData.release());
+ }
+
+ case MALLOCED:
+ case MAPPED: {
+ MOZ_ASSERT(contents);
+
+ RemoveCellMemory(buffer, buffer->associatedBytes(),
+ MemoryUse::ArrayBufferContents);
+
+ // Overwrite the old data pointer *without* releasing old data.
+ buffer->setDataPointer(BufferContents::createNoData());
+
+ // Detach |buffer| now that doing so won't release |oldContents|.
+ ArrayBufferObject::detach(cx, buffer);
+ return contents;
+ }
+
+ case WASM:
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_NO_TRANSFER);
+ return BufferContents::createFailed();
+
+ case EXTERNAL:
+ MOZ_ASSERT_UNREACHABLE(
+ "external ArrayBuffer shouldn't have passed the "
+ "structured-clone preflighting");
+ break;
+
+ case BAD1:
+ MOZ_ASSERT_UNREACHABLE("bad kind when stealing malloc'd data");
+ break;
+ }
+
+ MOZ_ASSERT_UNREACHABLE("garbage kind computed");
+ return BufferContents::createFailed();
+}
+
+/* static */
+void ArrayBufferObject::addSizeOfExcludingThis(
+ JSObject* obj, mozilla::MallocSizeOf mallocSizeOf, JS::ClassInfo* info,
+ JS::RuntimeSizes* runtimeSizes) {
+ auto& buffer = obj->as<ArrayBufferObject>();
+ switch (buffer.bufferKind()) {
+ case INLINE_DATA:
+ // Inline data's size should be reported by this object's size-class
+ // reporting.
+ break;
+ case MALLOCED:
+ if (buffer.isPreparedForAsmJS()) {
+ info->objectsMallocHeapElementsAsmJS +=
+ mallocSizeOf(buffer.dataPointer());
+ } else {
+ info->objectsMallocHeapElementsNormal +=
+ mallocSizeOf(buffer.dataPointer());
+ }
+ break;
+ case NO_DATA:
+ // No data is no memory.
+ MOZ_ASSERT(buffer.dataPointer() == nullptr);
+ break;
+ case USER_OWNED:
+ // User-owned data should be accounted for by the user.
+ break;
+ case EXTERNAL:
+ // External data will be accounted for by the owner of the buffer,
+ // not this view.
+ break;
+ case MAPPED:
+ info->objectsNonHeapElementsNormal += buffer.byteLength();
+ break;
+ case WASM:
+ if (!buffer.isDetached()) {
+ info->objectsNonHeapElementsWasm += buffer.byteLength();
+ if (runtimeSizes) {
+ MOZ_ASSERT(buffer.wasmMappedSize() >= buffer.byteLength());
+ runtimeSizes->wasmGuardPages +=
+ buffer.wasmMappedSize() - buffer.byteLength();
+ }
+ }
+ break;
+ case BAD1:
+ MOZ_CRASH("bad bufferKind()");
+ }
+}
+
+/* static */
+void ArrayBufferObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ obj->as<ArrayBufferObject>().releaseData(gcx);
+}
+
+/* static */
+void ArrayBufferObject::copyData(Handle<ArrayBufferObject*> toBuffer,
+ size_t toIndex,
+ Handle<ArrayBufferObject*> fromBuffer,
+ size_t fromIndex, size_t count) {
+ MOZ_ASSERT(toBuffer->byteLength() >= count);
+ MOZ_ASSERT(toBuffer->byteLength() >= toIndex + count);
+ MOZ_ASSERT(fromBuffer->byteLength() >= fromIndex);
+ MOZ_ASSERT(fromBuffer->byteLength() >= fromIndex + count);
+
+ memcpy(toBuffer->dataPointer() + toIndex,
+ fromBuffer->dataPointer() + fromIndex, count);
+}
+
+/* static */
+size_t ArrayBufferObject::objectMoved(JSObject* obj, JSObject* old) {
+ ArrayBufferObject& dst = obj->as<ArrayBufferObject>();
+ const ArrayBufferObject& src = old->as<ArrayBufferObject>();
+
+ // Fix up possible inline data pointer.
+ if (src.hasInlineData()) {
+ dst.setFixedSlot(DATA_SLOT, PrivateValue(dst.inlineDataPointer()));
+ }
+
+ return 0;
+}
+
+JSObject* ArrayBufferObject::firstView() {
+ return getFixedSlot(FIRST_VIEW_SLOT).isObject()
+ ? &getFixedSlot(FIRST_VIEW_SLOT).toObject()
+ : nullptr;
+}
+
+void ArrayBufferObject::setFirstView(ArrayBufferViewObject* view) {
+ setFixedSlot(FIRST_VIEW_SLOT, ObjectOrNullValue(view));
+}
+
+bool ArrayBufferObject::addView(JSContext* cx, ArrayBufferViewObject* view) {
+ if (!firstView()) {
+ setFirstView(view);
+ return true;
+ }
+
+ return ObjectRealm::get(this).innerViews.get().addView(cx, this, view);
+}
+
+/*
+ * InnerViewTable
+ */
+
+constexpr size_t VIEW_LIST_MAX_LENGTH = 500;
+
+bool InnerViewTable::addView(JSContext* cx, ArrayBufferObject* buffer,
+ JSObject* view) {
+ // ArrayBufferObject entries are only added when there are multiple views.
+ MOZ_ASSERT(buffer->firstView());
+
+ Map::AddPtr p = map.lookupForAdd(buffer);
+
+ MOZ_ASSERT(!gc::IsInsideNursery(buffer));
+ bool addToNursery = nurseryKeysValid && gc::IsInsideNursery(view);
+
+ if (p) {
+ ViewVector& views = p->value();
+ MOZ_ASSERT(!views.empty());
+
+ if (addToNursery) {
+ // Only add the entry to |nurseryKeys| if it isn't already there.
+ if (views.length() >= VIEW_LIST_MAX_LENGTH) {
+ // To avoid quadratic blowup, skip the loop below if we end up
+ // adding enormous numbers of views for the same object.
+ nurseryKeysValid = false;
+ } else {
+ for (size_t i = 0; i < views.length(); i++) {
+ if (gc::IsInsideNursery(views[i])) {
+ addToNursery = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!views.append(view)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ } else {
+ if (!map.add(p, buffer, ViewVector(cx->zone()))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ // ViewVector has one inline element, so the first insertion is
+ // guaranteed to succeed.
+ MOZ_ALWAYS_TRUE(p->value().append(view));
+ }
+
+ if (addToNursery && !nurseryKeys.append(buffer)) {
+ nurseryKeysValid = false;
+ }
+
+ return true;
+}
+
+InnerViewTable::ViewVector* InnerViewTable::maybeViewsUnbarriered(
+ ArrayBufferObject* buffer) {
+ Map::Ptr p = map.lookup(buffer);
+ if (p) {
+ return &p->value();
+ }
+ return nullptr;
+}
+
+void InnerViewTable::removeViews(ArrayBufferObject* buffer) {
+ Map::Ptr p = map.lookup(buffer);
+ MOZ_ASSERT(p);
+
+ map.remove(p);
+}
+
+bool InnerViewTable::traceWeak(JSTracer* trc) { return map.traceWeak(trc); }
+
+void InnerViewTable::sweepAfterMinorGC(JSTracer* trc) {
+ MOZ_ASSERT(needsSweepAfterMinorGC());
+
+ if (nurseryKeysValid) {
+ for (size_t i = 0; i < nurseryKeys.length(); i++) {
+ JSObject* buffer = MaybeForwarded(nurseryKeys[i]);
+ Map::Ptr p = map.lookup(buffer);
+ if (p &&
+ !Map::EntryGCPolicy::traceWeak(trc, &p->mutableKey(), &p->value())) {
+ map.remove(p);
+ }
+ }
+ } else {
+ // Do the required sweeping by looking at every map entry.
+ map.traceWeak(trc);
+ }
+
+ nurseryKeys.clear();
+ nurseryKeysValid = true;
+}
+
+size_t InnerViewTable::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ size_t vectorSize = 0;
+ for (Map::Enum e(map); !e.empty(); e.popFront()) {
+ vectorSize += e.front().value().sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ return vectorSize + map.shallowSizeOfExcludingThis(mallocSizeOf) +
+ nurseryKeys.sizeOfExcludingThis(mallocSizeOf);
+}
+
+template <>
+bool JSObject::is<js::ArrayBufferObjectMaybeShared>() const {
+ return is<ArrayBufferObject>() || is<SharedArrayBufferObject>();
+}
+
+JS_PUBLIC_API size_t JS::GetArrayBufferByteLength(JSObject* obj) {
+ ArrayBufferObject* aobj = obj->maybeUnwrapAs<ArrayBufferObject>();
+ return aobj ? aobj->byteLength() : 0;
+}
+
+JS_PUBLIC_API uint8_t* JS::GetArrayBufferData(JSObject* obj,
+ bool* isSharedMemory,
+ const JS::AutoRequireNoGC&) {
+ ArrayBufferObject* aobj = obj->maybeUnwrapIf<ArrayBufferObject>();
+ if (!aobj) {
+ return nullptr;
+ }
+ *isSharedMemory = false;
+ return aobj->dataPointer();
+}
+
+static ArrayBufferObject* UnwrapOrReportArrayBuffer(
+ JSContext* cx, JS::Handle<JSObject*> maybeArrayBuffer) {
+ JSObject* obj = CheckedUnwrapStatic(maybeArrayBuffer);
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return nullptr;
+ }
+
+ if (!obj->is<ArrayBufferObject>()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_ARRAYBUFFER_REQUIRED);
+ return nullptr;
+ }
+
+ return &obj->as<ArrayBufferObject>();
+}
+
+JS_PUBLIC_API bool JS::DetachArrayBuffer(JSContext* cx, HandleObject obj) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj);
+
+ Rooted<ArrayBufferObject*> unwrappedBuffer(
+ cx, UnwrapOrReportArrayBuffer(cx, obj));
+ if (!unwrappedBuffer) {
+ return false;
+ }
+
+ if (unwrappedBuffer->isWasm() || unwrappedBuffer->isPreparedForAsmJS()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_NO_TRANSFER);
+ return false;
+ }
+
+ AutoRealm ar(cx, unwrappedBuffer);
+ ArrayBufferObject::detach(cx, unwrappedBuffer);
+ return true;
+}
+
+JS_PUBLIC_API bool JS::HasDefinedArrayBufferDetachKey(JSContext* cx,
+ HandleObject obj,
+ bool* isDefined) {
+ Rooted<ArrayBufferObject*> unwrappedBuffer(
+ cx, UnwrapOrReportArrayBuffer(cx, obj));
+ if (!unwrappedBuffer) {
+ return false;
+ }
+
+ if (unwrappedBuffer->isWasm() || unwrappedBuffer->isPreparedForAsmJS()) {
+ *isDefined = true;
+ }
+
+ return true;
+}
+
+JS_PUBLIC_API bool JS::IsDetachedArrayBufferObject(JSObject* obj) {
+ ArrayBufferObject* aobj = obj->maybeUnwrapIf<ArrayBufferObject>();
+ if (!aobj) {
+ return false;
+ }
+
+ return aobj->isDetached();
+}
+
+JS_PUBLIC_API JSObject* JS::NewArrayBuffer(JSContext* cx, size_t nbytes) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ return ArrayBufferObject::createZeroed(cx, nbytes);
+}
+
+JS_PUBLIC_API JSObject* JS::NewArrayBufferWithContents(JSContext* cx,
+ size_t nbytes,
+ void* data) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_ASSERT_IF(!data, nbytes == 0);
+
+ if (!data) {
+ // Don't pass nulled contents to |createForContents|.
+ return ArrayBufferObject::createZeroed(cx, 0);
+ }
+
+ using BufferContents = ArrayBufferObject::BufferContents;
+
+ BufferContents contents = BufferContents::createMalloced(data);
+ return ArrayBufferObject::createForContents(cx, nbytes, contents);
+}
+
+JS_PUBLIC_API JSObject* JS::CopyArrayBuffer(JSContext* cx,
+ Handle<JSObject*> arrayBuffer) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ MOZ_ASSERT(arrayBuffer != nullptr);
+
+ Rooted<ArrayBufferObject*> unwrappedSource(
+ cx, UnwrapOrReportArrayBuffer(cx, arrayBuffer));
+ if (!unwrappedSource) {
+ return nullptr;
+ }
+
+ return ArrayBufferObject::copy(cx, unwrappedSource);
+}
+
+JS_PUBLIC_API JSObject* JS::NewExternalArrayBuffer(
+ JSContext* cx, size_t nbytes, void* data,
+ JS::BufferContentsFreeFunc freeFunc, void* freeUserData) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ MOZ_ASSERT(data);
+
+ using BufferContents = ArrayBufferObject::BufferContents;
+
+ BufferContents contents =
+ BufferContents::createExternal(data, freeFunc, freeUserData);
+ return ArrayBufferObject::createForContents(cx, nbytes, contents);
+}
+
+JS_PUBLIC_API JSObject* JS::NewArrayBufferWithUserOwnedContents(JSContext* cx,
+ size_t nbytes,
+ void* data) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ MOZ_ASSERT(data);
+
+ using BufferContents = ArrayBufferObject::BufferContents;
+
+ BufferContents contents = BufferContents::createUserOwned(data);
+ return ArrayBufferObject::createForContents(cx, nbytes, contents);
+}
+
+JS_PUBLIC_API bool JS::IsArrayBufferObject(JSObject* obj) {
+ return obj->canUnwrapAs<ArrayBufferObject>();
+}
+
+JS_PUBLIC_API bool JS::ArrayBufferHasData(JSObject* obj) {
+ return !obj->unwrapAs<ArrayBufferObject>().isDetached();
+}
+
+JS_PUBLIC_API JSObject* JS::UnwrapArrayBuffer(JSObject* obj) {
+ return obj->maybeUnwrapIf<ArrayBufferObject>();
+}
+
+JS_PUBLIC_API JSObject* JS::UnwrapSharedArrayBuffer(JSObject* obj) {
+ return obj->maybeUnwrapIf<SharedArrayBufferObject>();
+}
+
+JS_PUBLIC_API void* JS::StealArrayBufferContents(JSContext* cx,
+ HandleObject obj) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj);
+
+ Rooted<ArrayBufferObject*> unwrappedBuffer(
+ cx, UnwrapOrReportArrayBuffer(cx, obj));
+ if (!unwrappedBuffer) {
+ return nullptr;
+ }
+
+ if (unwrappedBuffer->isDetached()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_DETACHED);
+ return nullptr;
+ }
+
+ if (unwrappedBuffer->isWasm() || unwrappedBuffer->isPreparedForAsmJS()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_NO_TRANSFER);
+ return nullptr;
+ }
+
+ AutoRealm ar(cx, unwrappedBuffer);
+ return ArrayBufferObject::stealMallocedContents(cx, unwrappedBuffer);
+}
+
+JS_PUBLIC_API JSObject* JS::NewMappedArrayBufferWithContents(JSContext* cx,
+ size_t nbytes,
+ void* data) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ MOZ_ASSERT(data);
+
+ using BufferContents = ArrayBufferObject::BufferContents;
+
+ BufferContents contents = BufferContents::createMapped(data);
+ return ArrayBufferObject::createForContents(cx, nbytes, contents);
+}
+
+JS_PUBLIC_API void* JS::CreateMappedArrayBufferContents(int fd, size_t offset,
+ size_t length) {
+ return ArrayBufferObject::createMappedContents(fd, offset, length).data();
+}
+
+JS_PUBLIC_API void JS::ReleaseMappedArrayBufferContents(void* contents,
+ size_t length) {
+ gc::DeallocateMappedContent(contents, length);
+}
+
+JS_PUBLIC_API bool JS::IsMappedArrayBufferObject(JSObject* obj) {
+ ArrayBufferObject* aobj = obj->maybeUnwrapIf<ArrayBufferObject>();
+ if (!aobj) {
+ return false;
+ }
+
+ return aobj->isMapped();
+}
+
+JS_PUBLIC_API JSObject* JS::GetObjectAsArrayBuffer(JSObject* obj,
+ size_t* length,
+ uint8_t** data) {
+ ArrayBufferObject* aobj = obj->maybeUnwrapIf<ArrayBufferObject>();
+ if (!aobj) {
+ return nullptr;
+ }
+
+ *length = aobj->byteLength();
+ *data = aobj->dataPointer();
+
+ return aobj;
+}
+
+JS_PUBLIC_API void JS::GetArrayBufferLengthAndData(JSObject* obj,
+ size_t* length,
+ bool* isSharedMemory,
+ uint8_t** data) {
+ auto& aobj = obj->as<ArrayBufferObject>();
+ *length = aobj.byteLength();
+ *data = aobj.dataPointer();
+ *isSharedMemory = false;
+}
+
+const JSClass* const JS::ArrayBuffer::UnsharedClass =
+ &ArrayBufferObject::class_;
+const JSClass* const JS::ArrayBuffer::SharedClass =
+ &SharedArrayBufferObject::class_;
+
+/* static */ JS::ArrayBuffer JS::ArrayBuffer::create(JSContext* cx,
+ size_t nbytes) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ return JS::ArrayBuffer(ArrayBufferObject::createZeroed(cx, nbytes));
+}
+
+uint8_t* JS::ArrayBuffer::getLengthAndData(size_t* length, bool* isSharedMemory,
+ const JS::AutoRequireNoGC& nogc) {
+ auto* buffer = obj->maybeUnwrapAs<ArrayBufferObjectMaybeShared>();
+ if (!buffer) {
+ return nullptr;
+ }
+ *length = buffer->byteLength();
+ if (buffer->is<SharedArrayBufferObject>()) {
+ *isSharedMemory = true;
+ return buffer->dataPointerEither().unwrap();
+ }
+ *isSharedMemory = false;
+ return buffer->as<ArrayBufferObject>().dataPointer();
+};
+
+JS::ArrayBuffer JS::ArrayBuffer::unwrap(JSObject* maybeWrapped) {
+ if (!maybeWrapped) {
+ return JS::ArrayBuffer(nullptr);
+ }
+ auto* ab = maybeWrapped->maybeUnwrapIf<ArrayBufferObjectMaybeShared>();
+ return fromObject(ab);
+}
+
+bool JS::ArrayBufferCopyData(JSContext* cx, Handle<JSObject*> toBlock,
+ size_t toIndex, Handle<JSObject*> fromBlock,
+ size_t fromIndex, size_t count) {
+ Rooted<ArrayBufferObjectMaybeShared*> unwrappedToBlock(
+ cx, toBlock->maybeUnwrapIf<ArrayBufferObjectMaybeShared>());
+ if (!unwrappedToBlock) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+
+ Rooted<ArrayBufferObjectMaybeShared*> unwrappedFromBlock(
+ cx, fromBlock->maybeUnwrapIf<ArrayBufferObjectMaybeShared>());
+ if (!unwrappedFromBlock) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+
+ // Verify that lengths still make sense and throw otherwise.
+ if (toIndex + count < toIndex || // size_t overflow
+ fromIndex + count < fromIndex || // size_t overflow
+ toIndex + count > unwrappedToBlock->byteLength() ||
+ fromIndex + count > unwrappedFromBlock->byteLength()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_ARRAYBUFFER_COPY_RANGE);
+ return false;
+ }
+
+ // If both are array buffers, can use ArrayBufferCopyData
+ if (unwrappedToBlock->is<ArrayBufferObject>() &&
+ unwrappedFromBlock->is<ArrayBufferObject>()) {
+ Rooted<ArrayBufferObject*> toArray(
+ cx, &unwrappedToBlock->as<ArrayBufferObject>());
+ Rooted<ArrayBufferObject*> fromArray(
+ cx, &unwrappedFromBlock->as<ArrayBufferObject>());
+ ArrayBufferObject::copyData(toArray, toIndex, fromArray, fromIndex, count);
+ return true;
+ }
+
+ Rooted<ArrayBufferObjectMaybeShared*> toArray(
+ cx, &unwrappedToBlock->as<ArrayBufferObjectMaybeShared>());
+ Rooted<ArrayBufferObjectMaybeShared*> fromArray(
+ cx, &unwrappedFromBlock->as<ArrayBufferObjectMaybeShared>());
+ SharedArrayBufferObject::copyData(toArray, toIndex, fromArray, fromIndex,
+ count);
+
+ return true;
+}
+
+// https://tc39.es/ecma262/#sec-clonearraybuffer
+// We only support the case where cloneConstructor is %ArrayBuffer%. Note,
+// this means that cloning a SharedArrayBuffer will produce an ArrayBuffer
+JSObject* JS::ArrayBufferClone(JSContext* cx, Handle<JSObject*> srcBuffer,
+ size_t srcByteOffset, size_t srcLength) {
+ MOZ_ASSERT(srcBuffer->is<ArrayBufferObjectMaybeShared>());
+
+ // 2. (reordered) If IsDetachedBuffer(srcBuffer) is true, throw a TypeError
+ // exception.
+ if (IsDetachedArrayBufferObject(srcBuffer)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_DETACHED);
+ return nullptr;
+ }
+
+ // 1. Let targetBuffer be ? AllocateArrayBuffer(cloneConstructor, srcLength).
+ JS::RootedObject targetBuffer(cx, JS::NewArrayBuffer(cx, srcLength));
+ if (!targetBuffer) {
+ return nullptr;
+ }
+
+ // 3. Let srcBlock be srcBuffer.[[ArrayBufferData]].
+ // 4. Let targetBlock be targetBuffer.[[ArrayBufferData]].
+ // 5. Perform CopyDataBlockBytes(targetBlock, 0, srcBlock, srcByteOffset,
+ // srcLength).
+ if (!ArrayBufferCopyData(cx, targetBuffer, 0, srcBuffer, srcByteOffset,
+ srcLength)) {
+ return nullptr;
+ }
+
+ // 6. Return targetBuffer.
+ return targetBuffer;
+}
diff --git a/js/src/vm/ArrayBufferObject.h b/js/src/vm/ArrayBufferObject.h
new file mode 100644
index 0000000000..d2d94d6722
--- /dev/null
+++ b/js/src/vm/ArrayBufferObject.h
@@ -0,0 +1,660 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ArrayBufferObject_h
+#define vm_ArrayBufferObject_h
+
+#include "mozilla/Maybe.h"
+
+#include <tuple> // std::tuple
+
+#include "builtin/TypedArrayConstants.h"
+#include "gc/Memory.h"
+#include "gc/ZoneAllocator.h"
+#include "js/ArrayBuffer.h"
+#include "js/GCHashTable.h"
+#include "vm/JSFunction.h"
+#include "vm/JSObject.h"
+#include "vm/SharedMem.h"
+#include "wasm/WasmMemory.h"
+
+namespace js {
+
+class ArrayBufferViewObject;
+class AutoSetNewObjectMetadata;
+class WasmArrayRawBuffer;
+
+namespace wasm {
+struct MemoryDesc;
+} // namespace wasm
+
+// Create a new mapping of size `mappedSize` with an initially committed prefix
+// of size `initialCommittedSize`. Both arguments denote bytes and must be
+// multiples of the page size, with `initialCommittedSize` <= `mappedSize`.
+// Returns nullptr on failure.
+void* MapBufferMemory(wasm::IndexType, size_t mappedSize,
+ size_t initialCommittedSize);
+
+// Commit additional memory in an existing mapping. `dataEnd` must be the
+// correct value for the end of the existing committed area, and `delta` must be
+// a byte amount to grow the mapping by, and must be a multiple of the page
+// size. Returns false on failure.
+bool CommitBufferMemory(void* dataEnd, size_t delta);
+
+// Extend an existing mapping by adding uncommited pages to it. `dataStart`
+// must be the pointer to the start of the existing mapping, `mappedSize` the
+// size of the existing mapping, and `newMappedSize` the size of the extended
+// mapping (sizes in bytes), with `mappedSize` <= `newMappedSize`. Both sizes
+// must be divisible by the page size. Returns false on failure.
+bool ExtendBufferMapping(void* dataStart, size_t mappedSize,
+ size_t newMappedSize);
+
+// Remove an existing mapping. `dataStart` must be the pointer to the start of
+// the mapping, and `mappedSize` the size of that mapping.
+void UnmapBufferMemory(wasm::IndexType t, void* dataStart, size_t mappedSize);
+
+// Return the number of bytes currently reserved for WebAssembly memory
+uint64_t WasmReservedBytes();
+
+// The inheritance hierarchy for the various classes relating to typed arrays
+// is as follows.
+//
+//
+// - JSObject
+// - TypedObject (declared in wasm/TypedObject.h)
+// - NativeObject
+// - ArrayBufferObjectMaybeShared
+// - ArrayBufferObject
+// - SharedArrayBufferObject
+// - ArrayBufferViewObject
+// - DataViewObject
+// - TypedArrayObject (declared in vm/TypedArrayObject.h)
+// - TypedArrayObjectTemplate
+// - Int8ArrayObject
+// - Uint8ArrayObject
+// - ...
+//
+// Note that |TypedArrayObjectTemplate| is just an implementation
+// detail that makes implementing its various subclasses easier.
+//
+// ArrayBufferObject and SharedArrayBufferObject are unrelated data types:
+// the racy memory of the latter cannot substitute for the non-racy memory of
+// the former; the non-racy memory of the former cannot be used with the
+// atomics; the former can be detached and the latter not. Hence they have been
+// separated completely.
+//
+// Most APIs will only accept ArrayBufferObject. ArrayBufferObjectMaybeShared
+// exists as a join point to allow APIs that can take or use either, notably
+// AsmJS.
+//
+// In contrast with the separation of ArrayBufferObject and
+// SharedArrayBufferObject, the TypedArray types can map either.
+//
+// The possible data ownership and reference relationships with ArrayBuffers
+// and related classes are enumerated below. These are the possible locations
+// for typed data:
+//
+// (1) malloc'ed or mmap'ed data owned by an ArrayBufferObject.
+// (2) Data allocated inline with an ArrayBufferObject.
+// (3) Data allocated inline with a TypedArrayObject.
+// (4) Data allocated inline with an InlineTypedObject.
+//
+// An ArrayBufferObject may point to any of these sources of data, except (3).
+// All array buffer views may point to any of these sources of data, except
+// that (3) may only be pointed to by the typed array the data is inline with.
+//
+// During a minor GC, (3) and (4) may move. During a compacting GC, (2), (3),
+// and (4) may move.
+
+class ArrayBufferObjectMaybeShared;
+
+wasm::IndexType WasmArrayBufferIndexType(
+ const ArrayBufferObjectMaybeShared* buf);
+wasm::Pages WasmArrayBufferPages(const ArrayBufferObjectMaybeShared* buf);
+wasm::Pages WasmArrayBufferClampedMaxPages(
+ const ArrayBufferObjectMaybeShared* buf);
+mozilla::Maybe<wasm::Pages> WasmArrayBufferSourceMaxPages(
+ const ArrayBufferObjectMaybeShared* buf);
+size_t WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf);
+
+class ArrayBufferObjectMaybeShared : public NativeObject {
+ public:
+ inline size_t byteLength() const;
+ inline bool isDetached() const;
+ inline SharedMem<uint8_t*> dataPointerEither();
+
+ // WebAssembly support:
+ // Note: the eventual goal is to remove this from ArrayBuffer and have
+ // (Shared)ArrayBuffers alias memory owned by some wasm::Memory object.
+
+ wasm::IndexType wasmIndexType() const {
+ return WasmArrayBufferIndexType(this);
+ }
+ wasm::Pages wasmPages() const { return WasmArrayBufferPages(this); }
+ wasm::Pages wasmClampedMaxPages() const {
+ return WasmArrayBufferClampedMaxPages(this);
+ }
+ mozilla::Maybe<wasm::Pages> wasmSourceMaxPages() const {
+ return WasmArrayBufferSourceMaxPages(this);
+ }
+ size_t wasmMappedSize() const { return WasmArrayBufferMappedSize(this); }
+
+ inline bool isPreparedForAsmJS() const;
+ inline bool isWasm() const;
+};
+
+using RootedArrayBufferObjectMaybeShared =
+ Rooted<ArrayBufferObjectMaybeShared*>;
+using HandleArrayBufferObjectMaybeShared =
+ Handle<ArrayBufferObjectMaybeShared*>;
+using MutableHandleArrayBufferObjectMaybeShared =
+ MutableHandle<ArrayBufferObjectMaybeShared*>;
+
+/*
+ * ArrayBufferObject
+ *
+ * This class holds the underlying raw buffer that the various ArrayBufferViews
+ * (eg DataViewObject, the TypedArrays, TypedObjects) access. It can be created
+ * explicitly and used to construct an ArrayBufferView, or can be created
+ * lazily when it is first accessed for a TypedArrayObject or TypedObject that
+ * doesn't have an explicit buffer.
+ *
+ * ArrayBufferObject (or really the underlying memory) /is not racy/: the
+ * memory is private to a single worker.
+ */
+class ArrayBufferObject : public ArrayBufferObjectMaybeShared {
+ static bool byteLengthGetterImpl(JSContext* cx, const CallArgs& args);
+
+ public:
+ static const uint8_t DATA_SLOT = 0;
+ static const uint8_t BYTE_LENGTH_SLOT = 1;
+ static const uint8_t FIRST_VIEW_SLOT = 2;
+ static const uint8_t FLAGS_SLOT = 3;
+
+ static const uint8_t RESERVED_SLOTS = 4;
+
+ static const size_t ARRAY_BUFFER_ALIGNMENT = 8;
+
+ static_assert(FLAGS_SLOT == JS_ARRAYBUFFER_FLAGS_SLOT,
+ "self-hosted code with burned-in constants must get the "
+ "right flags slot");
+
+ // The length of an ArrayBuffer or SharedArrayBuffer can be at most INT32_MAX
+ // on 32-bit platforms. Allow a larger limit on 64-bit platforms.
+ static constexpr size_t MaxByteLengthForSmallBuffer = INT32_MAX;
+#ifdef JS_64BIT
+ static constexpr size_t MaxByteLength =
+ size_t(8) * 1024 * 1024 * 1024; // 8 GB.
+#else
+ static constexpr size_t MaxByteLength = MaxByteLengthForSmallBuffer;
+#endif
+
+ /** The largest number of bytes that can be stored inline. */
+ static constexpr size_t MaxInlineBytes =
+ (NativeObject::MAX_FIXED_SLOTS - RESERVED_SLOTS) * sizeof(JS::Value);
+
+ public:
+ enum BufferKind {
+ /** Inline data kept in the repurposed slots of this ArrayBufferObject. */
+ INLINE_DATA = 0b000,
+
+ /* Data allocated using the SpiderMonkey allocator. */
+ MALLOCED = 0b001,
+
+ /**
+ * No bytes are associated with this buffer. (This could be because the
+ * buffer is detached, because it's an internal, newborn buffer not yet
+ * overwritten with user-exposable semantics, or some other reason. The
+ * point is, don't read precise language semantics into this kind.)
+ */
+ NO_DATA = 0b010,
+
+ /**
+ * User-owned memory. The associated buffer must be manually detached
+ * before the user invalidates (deallocates, reuses the storage of, &c.)
+ * the user-owned memory.
+ */
+ USER_OWNED = 0b011,
+
+ WASM = 0b100,
+ MAPPED = 0b101,
+ EXTERNAL = 0b110,
+
+ // These kind-values are currently invalid. We intend to expand valid
+ // BufferKinds in the future to either partly or fully use these values.
+ BAD1 = 0b111,
+
+ KIND_MASK = 0b111
+ };
+
+ public:
+ enum ArrayBufferFlags {
+ // The flags also store the BufferKind
+ BUFFER_KIND_MASK = BufferKind::KIND_MASK,
+
+ DETACHED = 0b1000,
+
+ // This MALLOCED, MAPPED, or EXTERNAL buffer has been prepared for asm.js
+ // and cannot henceforth be transferred/detached. (WASM, USER_OWNED, and
+ // INLINE_DATA buffers can't be prepared for asm.js -- although if an
+ // INLINE_DATA buffer is used with asm.js, it's silently rewritten into a
+ // MALLOCED buffer which *can* be prepared.)
+ FOR_ASMJS = 0b10'0000,
+ };
+
+ static_assert(JS_ARRAYBUFFER_DETACHED_FLAG == DETACHED,
+ "self-hosted code with burned-in constants must use the "
+ "correct DETACHED bit value");
+
+ protected:
+ enum class FillContents { Zero, Uninitialized };
+
+ template <FillContents FillType>
+ static std::tuple<ArrayBufferObject*, uint8_t*> createBufferAndData(
+ JSContext* cx, size_t nbytes, AutoSetNewObjectMetadata&,
+ JS::Handle<JSObject*> proto = nullptr);
+
+ public:
+ class BufferContents {
+ uint8_t* data_;
+ BufferKind kind_;
+ JS::BufferContentsFreeFunc free_;
+ void* freeUserData_;
+
+ friend class ArrayBufferObject;
+
+ BufferContents(uint8_t* data, BufferKind kind,
+ JS::BufferContentsFreeFunc freeFunc = nullptr,
+ void* freeUserData = nullptr)
+ : data_(data),
+ kind_(kind),
+ free_(freeFunc),
+ freeUserData_(freeUserData) {
+ MOZ_ASSERT((kind_ & ~KIND_MASK) == 0);
+ MOZ_ASSERT_IF(free_ || freeUserData_, kind_ == EXTERNAL);
+
+ // It is the caller's responsibility to ensure that the
+ // BufferContents does not outlive the data.
+ }
+
+ public:
+ static BufferContents createInlineData(void* data) {
+ return BufferContents(static_cast<uint8_t*>(data), INLINE_DATA);
+ }
+
+ static BufferContents createMalloced(void* data) {
+ return BufferContents(static_cast<uint8_t*>(data), MALLOCED);
+ }
+
+ static BufferContents createNoData() {
+ return BufferContents(nullptr, NO_DATA);
+ }
+
+ static BufferContents createUserOwned(void* data) {
+ return BufferContents(static_cast<uint8_t*>(data), USER_OWNED);
+ }
+
+ static BufferContents createWasm(void* data) {
+ return BufferContents(static_cast<uint8_t*>(data), WASM);
+ }
+
+ static BufferContents createMapped(void* data) {
+ return BufferContents(static_cast<uint8_t*>(data), MAPPED);
+ }
+
+ static BufferContents createExternal(void* data,
+ JS::BufferContentsFreeFunc freeFunc,
+ void* freeUserData = nullptr) {
+ return BufferContents(static_cast<uint8_t*>(data), EXTERNAL, freeFunc,
+ freeUserData);
+ }
+
+ static BufferContents createFailed() {
+ // There's no harm in tagging this as MALLOCED, even tho obviously it
+ // isn't. And adding an extra tag purely for this case is a complication
+ // that presently appears avoidable.
+ return BufferContents(nullptr, MALLOCED);
+ }
+
+ uint8_t* data() const { return data_; }
+ BufferKind kind() const { return kind_; }
+ JS::BufferContentsFreeFunc freeFunc() const { return free_; }
+ void* freeUserData() const { return freeUserData_; }
+
+ explicit operator bool() const { return data_ != nullptr; }
+ WasmArrayRawBuffer* wasmBuffer() const;
+ };
+
+ static const JSClass class_;
+ static const JSClass protoClass_;
+
+ static bool byteLengthGetter(JSContext* cx, unsigned argc, Value* vp);
+
+ static bool fun_isView(JSContext* cx, unsigned argc, Value* vp);
+
+ static bool class_constructor(JSContext* cx, unsigned argc, Value* vp);
+
+ static bool isOriginalByteLengthGetter(Native native) {
+ return native == byteLengthGetter;
+ }
+
+ static ArrayBufferObject* createForContents(JSContext* cx, size_t nbytes,
+ BufferContents contents);
+
+ static ArrayBufferObject* copy(
+ JSContext* cx, JS::Handle<ArrayBufferObject*> unwrappedArrayBuffer);
+
+ static ArrayBufferObject* createZeroed(JSContext* cx, size_t nbytes,
+ HandleObject proto = nullptr);
+
+ // Create an ArrayBufferObject that is safely finalizable and can later be
+ // initialize()d to become a real, content-visible ArrayBufferObject.
+ static ArrayBufferObject* createEmpty(JSContext* cx);
+
+ // Create an ArrayBufferObject using the provided buffer and size. Assumes
+ // ownership of |buffer| even in case of failure, i.e. on failure |buffer|
+ // is deallocated.
+ static ArrayBufferObject* createFromNewRawBuffer(JSContext* cx,
+ WasmArrayRawBuffer* buffer,
+ size_t initialSize);
+
+ static void copyData(Handle<ArrayBufferObject*> toBuffer, size_t toIndex,
+ Handle<ArrayBufferObject*> fromBuffer, size_t fromIndex,
+ size_t count);
+
+ static size_t objectMoved(JSObject* obj, JSObject* old);
+
+ static uint8_t* stealMallocedContents(JSContext* cx,
+ Handle<ArrayBufferObject*> buffer);
+
+ static BufferContents extractStructuredCloneContents(
+ JSContext* cx, Handle<ArrayBufferObject*> buffer);
+
+ static void addSizeOfExcludingThis(JSObject* obj,
+ mozilla::MallocSizeOf mallocSizeOf,
+ JS::ClassInfo* info,
+ JS::RuntimeSizes* runtimeSizes);
+
+ // ArrayBufferObjects (strongly) store the first view added to them, while
+ // later views are (weakly) stored in the compartment's InnerViewTable
+ // below. Buffers usually only have one view, so this slot optimizes for
+ // the common case. Avoiding entries in the InnerViewTable saves memory and
+ // non-incrementalized sweep time.
+ JSObject* firstView();
+
+ bool addView(JSContext* cx, ArrayBufferViewObject* view);
+
+ // Detach this buffer from its original memory. (This necessarily makes
+ // views of this buffer unusable for modifying that original memory.)
+ static void detach(JSContext* cx, Handle<ArrayBufferObject*> buffer);
+
+ static constexpr size_t offsetOfByteLengthSlot() {
+ return getFixedSlotOffset(BYTE_LENGTH_SLOT);
+ }
+ static constexpr size_t offsetOfFlagsSlot() {
+ return getFixedSlotOffset(FLAGS_SLOT);
+ }
+
+ private:
+ void setFirstView(ArrayBufferViewObject* view);
+
+ uint8_t* inlineDataPointer() const;
+
+ struct FreeInfo {
+ JS::BufferContentsFreeFunc freeFunc;
+ void* freeUserData;
+ };
+ FreeInfo* freeInfo() const;
+
+ public:
+ uint8_t* dataPointer() const;
+ SharedMem<uint8_t*> dataPointerShared() const;
+ size_t byteLength() const;
+
+ BufferContents contents() const {
+ if (isExternal()) {
+ return BufferContents(dataPointer(), EXTERNAL, freeInfo()->freeFunc,
+ freeInfo()->freeUserData);
+ }
+ return BufferContents(dataPointer(), bufferKind());
+ }
+ bool hasInlineData() const { return dataPointer() == inlineDataPointer(); }
+
+ void releaseData(JS::GCContext* gcx);
+
+ BufferKind bufferKind() const {
+ return BufferKind(flags() & BUFFER_KIND_MASK);
+ }
+
+ bool isInlineData() const { return bufferKind() == INLINE_DATA; }
+ bool isMalloced() const { return bufferKind() == MALLOCED; }
+ bool isNoData() const { return bufferKind() == NO_DATA; }
+ bool hasUserOwnedData() const { return bufferKind() == USER_OWNED; }
+
+ bool isWasm() const { return bufferKind() == WASM; }
+ bool isMapped() const { return bufferKind() == MAPPED; }
+ bool isExternal() const { return bufferKind() == EXTERNAL; }
+
+ bool isDetached() const { return flags() & DETACHED; }
+ bool isPreparedForAsmJS() const { return flags() & FOR_ASMJS; }
+
+ // WebAssembly support:
+
+ /**
+ * Prepare this ArrayBuffer for use with asm.js. Returns true on success,
+ * false on failure. This function reports no errors.
+ */
+ [[nodiscard]] bool prepareForAsmJS();
+
+ size_t wasmMappedSize() const;
+
+ wasm::IndexType wasmIndexType() const;
+ wasm::Pages wasmPages() const;
+ wasm::Pages wasmClampedMaxPages() const;
+ mozilla::Maybe<wasm::Pages> wasmSourceMaxPages() const;
+
+ [[nodiscard]] static bool wasmGrowToPagesInPlace(
+ wasm::IndexType t, wasm::Pages newPages,
+ Handle<ArrayBufferObject*> oldBuf,
+ MutableHandle<ArrayBufferObject*> newBuf, JSContext* cx);
+ [[nodiscard]] static bool wasmMovingGrowToPages(
+ wasm::IndexType t, wasm::Pages newPages,
+ Handle<ArrayBufferObject*> oldBuf,
+ MutableHandle<ArrayBufferObject*> newBuf, JSContext* cx);
+ static void wasmDiscard(Handle<ArrayBufferObject*> buf, uint64_t byteOffset,
+ uint64_t byteLength);
+
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+
+ static BufferContents createMappedContents(int fd, size_t offset,
+ size_t length);
+
+ protected:
+ void setDataPointer(BufferContents contents);
+ void setByteLength(size_t length);
+
+ size_t associatedBytes() const;
+
+ uint32_t flags() const;
+ void setFlags(uint32_t flags);
+
+ void setIsDetached() { setFlags(flags() | DETACHED); }
+ void setIsPreparedForAsmJS() {
+ MOZ_ASSERT(!isWasm());
+ MOZ_ASSERT(!hasUserOwnedData());
+ MOZ_ASSERT(!isInlineData());
+ MOZ_ASSERT(isMalloced() || isMapped() || isExternal());
+ setFlags(flags() | FOR_ASMJS);
+ }
+
+ void initialize(size_t byteLength, BufferContents contents) {
+ setByteLength(byteLength);
+ setFlags(0);
+ setFirstView(nullptr);
+ setDataPointer(contents);
+ }
+
+ void* initializeToInlineData(size_t byteLength) {
+ void* data = inlineDataPointer();
+ initialize(byteLength, BufferContents::createInlineData(data));
+ return data;
+ }
+};
+
+using RootedArrayBufferObject = Rooted<ArrayBufferObject*>;
+using HandleArrayBufferObject = Handle<ArrayBufferObject*>;
+using MutableHandleArrayBufferObject = MutableHandle<ArrayBufferObject*>;
+
+// Create a buffer for a wasm memory, whose type is determined by
+// memory.indexType().
+bool CreateWasmBuffer(JSContext* cx, const wasm::MemoryDesc& memory,
+ MutableHandleArrayBufferObjectMaybeShared buffer);
+
+// Per-compartment table that manages the relationship between array buffers
+// and the views that use their storage.
+class InnerViewTable {
+ public:
+ using ViewVector = GCVector<UnsafeBarePtr<JSObject*>, 1, ZoneAllocPolicy>;
+
+ friend class ArrayBufferObject;
+
+ private:
+ // This key is a raw pointer and not a WeakHeapPtr because the post-barrier
+ // would hold nursery-allocated entries live unconditionally. It is a very
+ // common pattern in low-level and performance-oriented JavaScript to create
+ // hundreds or thousands of very short lived temporary views on a larger
+ // buffer; having to tenure all of these would be a catastrophic performance
+ // regression. Thus, it is vital that nursery pointers in this map not be held
+ // live. Special support is required in the minor GC, implemented in
+ // sweepAfterMinorGC.
+ using Map = GCHashMap<UnsafeBarePtr<JSObject*>, ViewVector,
+ StableCellHasher<JSObject*>, ZoneAllocPolicy>;
+
+ // For all objects sharing their storage with some other view, this maps
+ // the object to the list of such views. All entries in this map are weak.
+ Map map;
+
+ // List of keys from innerViews where either the source or at least one
+ // target is in the nursery. The raw pointer to a JSObject is allowed here
+ // because this vector is cleared after every minor collection. Users in
+ // sweepAfterMinorCollection must be careful to use MaybeForwarded before
+ // touching these pointers.
+ Vector<JSObject*, 0, SystemAllocPolicy> nurseryKeys;
+
+ // Whether nurseryKeys is a complete list.
+ bool nurseryKeysValid;
+
+ bool addView(JSContext* cx, ArrayBufferObject* buffer, JSObject* view);
+ ViewVector* maybeViewsUnbarriered(ArrayBufferObject* obj);
+ void removeViews(ArrayBufferObject* obj);
+
+ public:
+ explicit InnerViewTable(Zone* zone) : map(zone), nurseryKeysValid(true) {}
+
+ // Remove references to dead objects in the table and update table entries
+ // to reflect moved objects.
+ bool traceWeak(JSTracer* trc);
+ void sweepAfterMinorGC(JSTracer* trc);
+
+ bool empty() const { return map.empty(); }
+
+ bool needsSweepAfterMinorGC() const {
+ return !nurseryKeys.empty() || !nurseryKeysValid;
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
+};
+
+template <typename Wrapper>
+class MutableWrappedPtrOperations<InnerViewTable, Wrapper>
+ : public WrappedPtrOperations<InnerViewTable, Wrapper> {
+ InnerViewTable& table() { return static_cast<Wrapper*>(this)->get(); }
+
+ public:
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return table().sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+class WasmArrayRawBuffer {
+ wasm::IndexType indexType_;
+ wasm::Pages clampedMaxPages_;
+ mozilla::Maybe<wasm::Pages> sourceMaxPages_;
+ size_t mappedSize_; // Not including the header page
+ size_t length_;
+
+ protected:
+ WasmArrayRawBuffer(wasm::IndexType indexType, uint8_t* buffer,
+ wasm::Pages clampedMaxPages,
+ const mozilla::Maybe<wasm::Pages>& sourceMaxPages,
+ size_t mappedSize, size_t length)
+ : indexType_(indexType),
+ clampedMaxPages_(clampedMaxPages),
+ sourceMaxPages_(sourceMaxPages),
+ mappedSize_(mappedSize),
+ length_(length) {
+ MOZ_ASSERT(buffer == dataPointer());
+ }
+
+ public:
+ static WasmArrayRawBuffer* AllocateWasm(
+ wasm::IndexType indexType, wasm::Pages initialPages,
+ wasm::Pages clampedMaxPages,
+ const mozilla::Maybe<wasm::Pages>& sourceMaxPages,
+ const mozilla::Maybe<size_t>& mappedSize);
+ static void Release(void* mem);
+
+ uint8_t* dataPointer() {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(this);
+ return ptr + sizeof(WasmArrayRawBuffer);
+ }
+
+ static const WasmArrayRawBuffer* fromDataPtr(const uint8_t* dataPtr) {
+ return reinterpret_cast<const WasmArrayRawBuffer*>(
+ dataPtr - sizeof(WasmArrayRawBuffer));
+ }
+
+ static WasmArrayRawBuffer* fromDataPtr(uint8_t* dataPtr) {
+ return reinterpret_cast<WasmArrayRawBuffer*>(dataPtr -
+ sizeof(WasmArrayRawBuffer));
+ }
+
+ wasm::IndexType indexType() const { return indexType_; }
+
+ uint8_t* basePointer() { return dataPointer() - gc::SystemPageSize(); }
+
+ size_t mappedSize() const { return mappedSize_; }
+
+ size_t byteLength() const { return length_; }
+
+ wasm::Pages pages() const {
+ return wasm::Pages::fromByteLengthExact(length_);
+ }
+
+ wasm::Pages clampedMaxPages() const { return clampedMaxPages_; }
+
+ mozilla::Maybe<wasm::Pages> sourceMaxPages() const { return sourceMaxPages_; }
+
+ [[nodiscard]] bool growToPagesInPlace(wasm::Pages newPages);
+
+ [[nodiscard]] bool extendMappedSize(wasm::Pages maxPages);
+
+ // Try and grow the mapped region of memory. Does not change current size.
+ // Does not move memory if no space to grow.
+ void tryGrowMaxPagesInPlace(wasm::Pages deltaMaxPages);
+
+ // Discard a region of memory, zeroing the pages and releasing physical memory
+ // back to the operating system. byteOffset and byteLen must be wasm page
+ // aligned and in bounds. A discard of zero bytes will have no effect.
+ void discard(size_t byteOffset, size_t byteLen);
+};
+
+} // namespace js
+
+template <>
+bool JSObject::is<js::ArrayBufferObjectMaybeShared>() const;
+
+#endif // vm_ArrayBufferObject_h
diff --git a/js/src/vm/ArrayBufferObjectMaybeShared.cpp b/js/src/vm/ArrayBufferObjectMaybeShared.cpp
new file mode 100644
index 0000000000..400a8baa3f
--- /dev/null
+++ b/js/src/vm/ArrayBufferObjectMaybeShared.cpp
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include <stdint.h> // uint8_t, uint32_t
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "js/ArrayBufferMaybeShared.h"
+#include "vm/ArrayBufferObject.h" // js::ArrayBufferObject
+#include "vm/JSObject.h" // JSObject
+#include "vm/SharedArrayObject.h" // js::SharedArrayBufferObject
+#include "vm/SharedMem.h" // SharedMem
+
+using namespace js;
+
+JS_PUBLIC_API bool JS::IsArrayBufferObjectMaybeShared(JSObject* obj) {
+ return obj->canUnwrapAs<ArrayBufferObjectMaybeShared>();
+}
+
+JS_PUBLIC_API JSObject* JS::UnwrapArrayBufferMaybeShared(JSObject* obj) {
+ return obj->maybeUnwrapIf<ArrayBufferObjectMaybeShared>();
+}
+
+JS_PUBLIC_API void JS::GetArrayBufferMaybeSharedLengthAndData(
+ JSObject* obj, size_t* length, bool* isSharedMemory, uint8_t** data) {
+ MOZ_ASSERT(obj->is<ArrayBufferObjectMaybeShared>());
+
+ if (obj->is<SharedArrayBufferObject>()) {
+ auto* buffer = &obj->as<SharedArrayBufferObject>();
+ *length = buffer->byteLength();
+ *data = buffer->dataPointerShared().unwrap();
+ *isSharedMemory = true;
+ } else {
+ auto* buffer = &obj->as<ArrayBufferObject>();
+ *length = buffer->byteLength();
+ *data = buffer->dataPointer();
+ *isSharedMemory = false;
+ }
+}
+
+JS_PUBLIC_API uint8_t* JS::GetArrayBufferMaybeSharedData(
+ JSObject* obj, bool* isSharedMemory, const JS::AutoRequireNoGC&) {
+ MOZ_ASSERT(obj->maybeUnwrapIf<ArrayBufferObjectMaybeShared>());
+
+ if (ArrayBufferObject* aobj = obj->maybeUnwrapIf<ArrayBufferObject>()) {
+ *isSharedMemory = false;
+ return aobj->dataPointer();
+ } else if (SharedArrayBufferObject* saobj =
+ obj->maybeUnwrapIf<SharedArrayBufferObject>()) {
+ *isSharedMemory = true;
+ return saobj->dataPointerShared().unwrap();
+ }
+
+ return nullptr;
+}
+
+JS_PUBLIC_API bool JS::IsLargeArrayBufferMaybeShared(JSObject* obj) {
+#ifdef JS_64BIT
+ obj = UnwrapArrayBufferMaybeShared(obj);
+ MOZ_ASSERT(obj);
+ size_t len = obj->is<ArrayBufferObject>()
+ ? obj->as<ArrayBufferObject>().byteLength()
+ : obj->as<SharedArrayBufferObject>().byteLength();
+ return len > ArrayBufferObject::MaxByteLengthForSmallBuffer;
+#else
+ // Large ArrayBuffers are not supported on 32-bit.
+ static_assert(ArrayBufferObject::MaxByteLength ==
+ ArrayBufferObject::MaxByteLengthForSmallBuffer);
+ return false;
+#endif
+}
diff --git a/js/src/vm/ArrayBufferViewObject.cpp b/js/src/vm/ArrayBufferViewObject.cpp
new file mode 100644
index 0000000000..11e6d9fa72
--- /dev/null
+++ b/js/src/vm/ArrayBufferViewObject.cpp
@@ -0,0 +1,319 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/ArrayBufferViewObject.h"
+
+#include "builtin/DataViewObject.h"
+#include "gc/Nursery.h"
+#include "js/experimental/TypedData.h" // JS_GetArrayBufferView{Data,Buffer,Length,ByteOffset}, JS_GetObjectAsArrayBufferView, JS_IsArrayBufferViewObject
+#include "js/SharedArrayBuffer.h"
+#include "vm/Compartment.h"
+#include "vm/JSContext.h"
+#include "vm/TypedArrayObject.h"
+
+#include "gc/Nursery-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+
+// This method is used to trace TypedArrayObjects and DataViewObjects. It
+// updates the object's data pointer if it points to inline data in an object
+// that was moved.
+/* static */
+void ArrayBufferViewObject::trace(JSTracer* trc, JSObject* obj) {
+ ArrayBufferViewObject* view = &obj->as<ArrayBufferViewObject>();
+
+ // Update view's data pointer if it moved.
+ if (view->hasBuffer()) {
+ JSObject* bufferObj = &view->bufferValue().toObject();
+ if (gc::MaybeForwardedObjectIs<ArrayBufferObject>(bufferObj)) {
+ auto* buffer = &gc::MaybeForwardedObjectAs<ArrayBufferObject>(bufferObj);
+
+ size_t offset = view->byteOffset();
+ MOZ_ASSERT_IF(!buffer->dataPointer(), offset == 0);
+
+ // The data may or may not be inline with the buffer. The buffer can only
+ // move during a compacting GC, in which case its objectMoved hook has
+ // already updated the buffer's data pointer.
+ void* oldData = view->dataPointerEither_();
+ void* data = buffer->dataPointer() + offset;
+ if (data != oldData) {
+ view->getFixedSlotRef(DATA_SLOT).unbarrieredSet(PrivateValue(data));
+ }
+ }
+ }
+}
+
+template <>
+bool JSObject::is<js::ArrayBufferViewObject>() const {
+ return is<DataViewObject>() || is<TypedArrayObject>();
+}
+
+void ArrayBufferViewObject::notifyBufferDetached() {
+ MOZ_ASSERT(!isSharedMemory());
+ MOZ_ASSERT(hasBuffer());
+
+ setFixedSlot(LENGTH_SLOT, PrivateValue(size_t(0)));
+ setFixedSlot(BYTEOFFSET_SLOT, PrivateValue(size_t(0)));
+ setFixedSlot(DATA_SLOT, UndefinedValue());
+}
+
+/* static */
+ArrayBufferObjectMaybeShared* ArrayBufferViewObject::bufferObject(
+ JSContext* cx, Handle<ArrayBufferViewObject*> thisObject) {
+ if (thisObject->is<TypedArrayObject>()) {
+ Rooted<TypedArrayObject*> typedArray(cx,
+ &thisObject->as<TypedArrayObject>());
+ if (!TypedArrayObject::ensureHasBuffer(cx, typedArray)) {
+ return nullptr;
+ }
+ }
+ return thisObject->bufferEither();
+}
+
+bool ArrayBufferViewObject::init(JSContext* cx,
+ ArrayBufferObjectMaybeShared* buffer,
+ size_t byteOffset, size_t length,
+ uint32_t bytesPerElement) {
+ MOZ_ASSERT_IF(!buffer, byteOffset == 0);
+ MOZ_ASSERT_IF(buffer, !buffer->isDetached());
+
+ MOZ_ASSERT(byteOffset <= ArrayBufferObject::MaxByteLength);
+ MOZ_ASSERT(length <= ArrayBufferObject::MaxByteLength);
+ MOZ_ASSERT(byteOffset + length <= ArrayBufferObject::MaxByteLength);
+
+ MOZ_ASSERT_IF(is<TypedArrayObject>(),
+ length <= TypedArrayObject::MaxByteLength / bytesPerElement);
+
+ // The isSharedMemory property is invariant. Self-hosting code that
+ // sets BUFFER_SLOT or the private slot (if it does) must maintain it by
+ // always setting those to reference shared memory.
+ if (buffer && buffer->is<SharedArrayBufferObject>()) {
+ setIsSharedMemory();
+ }
+
+ initFixedSlot(BYTEOFFSET_SLOT, PrivateValue(byteOffset));
+ initFixedSlot(LENGTH_SLOT, PrivateValue(length));
+ initFixedSlot(BUFFER_SLOT, ObjectOrNullValue(buffer));
+
+ if (buffer) {
+ SharedMem<uint8_t*> ptr = buffer->dataPointerEither();
+ initDataPointer(ptr + byteOffset);
+
+ // Only ArrayBuffers used for inline typed objects can have
+ // nursery-allocated data and we shouldn't see such buffers here.
+ MOZ_ASSERT_IF(buffer->byteLength() > 0, !cx->nursery().isInside(ptr));
+ } else {
+ MOZ_ASSERT(is<TypedArrayObject>());
+ MOZ_ASSERT(length * bytesPerElement <=
+ TypedArrayObject::INLINE_BUFFER_LIMIT);
+ void* data = fixedData(TypedArrayObject::FIXED_DATA_START);
+ initReservedSlot(DATA_SLOT, PrivateValue(data));
+ memset(data, 0, length * bytesPerElement);
+#ifdef DEBUG
+ if (length == 0) {
+ uint8_t* elements = static_cast<uint8_t*>(data);
+ elements[0] = ZeroLengthArrayData;
+ }
+#endif
+ }
+
+#ifdef DEBUG
+ if (buffer) {
+ size_t viewByteLength = length * bytesPerElement;
+ size_t viewByteOffset = byteOffset;
+ size_t bufferByteLength = buffer->byteLength();
+ // Unwraps are safe: both are for the pointer value.
+ MOZ_ASSERT_IF(buffer->is<ArrayBufferObject>(),
+ buffer->dataPointerEither().unwrap(/*safe*/) <=
+ dataPointerEither().unwrap(/*safe*/));
+ MOZ_ASSERT(bufferByteLength - viewByteOffset >= viewByteLength);
+ MOZ_ASSERT(viewByteOffset <= bufferByteLength);
+ }
+#endif
+
+ // ArrayBufferObjects track their views to support detaching.
+ if (buffer && buffer->is<ArrayBufferObject>()) {
+ if (!buffer->as<ArrayBufferObject>().addView(cx, this)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* JS Public API */
+
+JS_PUBLIC_API bool JS_IsArrayBufferViewObject(JSObject* obj) {
+ return obj->canUnwrapAs<ArrayBufferViewObject>();
+}
+
+JS_PUBLIC_API JSObject* js::UnwrapArrayBufferView(JSObject* obj) {
+ return obj->maybeUnwrapIf<ArrayBufferViewObject>();
+}
+
+JS_PUBLIC_API void* JS_GetArrayBufferViewData(JSObject* obj,
+ bool* isSharedMemory,
+ const JS::AutoRequireNoGC&) {
+ ArrayBufferViewObject* view = obj->maybeUnwrapAs<ArrayBufferViewObject>();
+ if (!view) {
+ return nullptr;
+ }
+
+ *isSharedMemory = view->isSharedMemory();
+ return view->dataPointerEither().unwrap(
+ /*safe - caller sees isSharedMemory flag*/);
+}
+
+JS_PUBLIC_API uint8_t* JS_GetArrayBufferViewFixedData(JSObject* obj,
+ uint8_t* buffer,
+ size_t bufSize) {
+ ArrayBufferViewObject* view = obj->maybeUnwrapAs<ArrayBufferViewObject>();
+ if (!view) {
+ return nullptr;
+ }
+
+ // Disallow shared memory until it is needed.
+ if (view->isSharedMemory()) {
+ return nullptr;
+ }
+
+ // TypedArrays (but not DataViews) can have inline data, in which case we
+ // need to copy into the given buffer.
+ if (view->is<TypedArrayObject>()) {
+ TypedArrayObject* ta = &view->as<TypedArrayObject>();
+ if (ta->hasInlineElements()) {
+ size_t bytes = ta->byteLength();
+ if (bytes > bufSize) {
+ return nullptr; // Does not fit.
+ }
+ memcpy(buffer, view->dataPointerUnshared(), bytes);
+ return buffer;
+ }
+ }
+
+ return static_cast<uint8_t*>(view->dataPointerUnshared());
+}
+
+JS_PUBLIC_API JSObject* JS_GetArrayBufferViewBuffer(JSContext* cx,
+ HandleObject obj,
+ bool* isSharedMemory) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj);
+
+ Rooted<ArrayBufferViewObject*> unwrappedView(
+ cx, obj->maybeUnwrapAs<ArrayBufferViewObject>());
+ if (!unwrappedView) {
+ ReportAccessDenied(cx);
+ return nullptr;
+ }
+
+ ArrayBufferObjectMaybeShared* unwrappedBuffer;
+ {
+ AutoRealm ar(cx, unwrappedView);
+ unwrappedBuffer = ArrayBufferViewObject::bufferObject(cx, unwrappedView);
+ if (!unwrappedBuffer) {
+ return nullptr;
+ }
+ }
+ *isSharedMemory = unwrappedBuffer->is<SharedArrayBufferObject>();
+
+ RootedObject buffer(cx, unwrappedBuffer);
+ if (!cx->compartment()->wrap(cx, &buffer)) {
+ return nullptr;
+ }
+
+ return buffer;
+}
+
+JS_PUBLIC_API size_t JS_GetArrayBufferViewByteLength(JSObject* obj) {
+ obj = obj->maybeUnwrapAs<ArrayBufferViewObject>();
+ if (!obj) {
+ return 0;
+ }
+ size_t length = obj->is<DataViewObject>()
+ ? obj->as<DataViewObject>().byteLength()
+ : obj->as<TypedArrayObject>().byteLength();
+ return length;
+}
+
+bool JS::ArrayBufferView::isDetached() const {
+ MOZ_ASSERT(obj);
+ return obj->as<ArrayBufferViewObject>().hasDetachedBuffer();
+}
+
+JS_PUBLIC_API size_t JS_GetArrayBufferViewByteOffset(JSObject* obj) {
+ obj = obj->maybeUnwrapAs<ArrayBufferViewObject>();
+ if (!obj) {
+ return 0;
+ }
+ size_t offset = obj->is<DataViewObject>()
+ ? obj->as<DataViewObject>().byteOffset()
+ : obj->as<TypedArrayObject>().byteOffset();
+ return offset;
+}
+
+JS_PUBLIC_API uint8_t* JS::ArrayBufferView::getLengthAndData(
+ size_t* length, bool* isSharedMemory, const AutoRequireNoGC&) {
+ MOZ_ASSERT(obj->is<ArrayBufferViewObject>());
+ size_t byteLength = obj->is<DataViewObject>()
+ ? obj->as<DataViewObject>().byteLength()
+ : obj->as<TypedArrayObject>().byteLength();
+ *length = byteLength; // *Not* the number of elements in the array, if
+ // sizeof(elt) != 1.
+
+ ArrayBufferViewObject& view = obj->as<ArrayBufferViewObject>();
+ *isSharedMemory = view.isSharedMemory();
+ return static_cast<uint8_t*>(
+ view.dataPointerEither().unwrap(/*safe - caller sees isShared flag*/));
+}
+
+JS_PUBLIC_API JSObject* JS_GetObjectAsArrayBufferView(JSObject* obj,
+ size_t* length,
+ bool* isSharedMemory,
+ uint8_t** data) {
+ obj = obj->maybeUnwrapIf<ArrayBufferViewObject>();
+ if (!obj) {
+ return nullptr;
+ }
+
+ js::GetArrayBufferViewLengthAndData(obj, length, isSharedMemory, data);
+ return obj;
+}
+
+JS_PUBLIC_API void js::GetArrayBufferViewLengthAndData(JSObject* obj,
+ size_t* length,
+ bool* isSharedMemory,
+ uint8_t** data) {
+ JS::AutoAssertNoGC nogc;
+ *data = JS::ArrayBufferView::fromObject(obj).getLengthAndData(
+ length, isSharedMemory, nogc);
+}
+
+JS_PUBLIC_API bool JS::IsArrayBufferViewShared(JSObject* obj) {
+ ArrayBufferViewObject* view = obj->maybeUnwrapAs<ArrayBufferViewObject>();
+ if (!view) {
+ return false;
+ }
+ return view->isSharedMemory();
+}
+
+JS_PUBLIC_API bool JS::IsLargeArrayBufferView(JSObject* obj) {
+#ifdef JS_64BIT
+ obj = &obj->unwrapAs<ArrayBufferViewObject>();
+ size_t len = obj->is<DataViewObject>()
+ ? obj->as<DataViewObject>().byteLength()
+ : obj->as<TypedArrayObject>().byteLength();
+ return len > ArrayBufferObject::MaxByteLengthForSmallBuffer;
+#else
+ // Large ArrayBuffers are not supported on 32-bit.
+ static_assert(ArrayBufferObject::MaxByteLength ==
+ ArrayBufferObject::MaxByteLengthForSmallBuffer);
+ return false;
+#endif
+}
diff --git a/js/src/vm/ArrayBufferViewObject.h b/js/src/vm/ArrayBufferViewObject.h
new file mode 100644
index 0000000000..7d6a4b70bd
--- /dev/null
+++ b/js/src/vm/ArrayBufferViewObject.h
@@ -0,0 +1,166 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ArrayBufferViewObject_h
+#define vm_ArrayBufferViewObject_h
+
+#include "builtin/TypedArrayConstants.h"
+#include "vm/ArrayBufferObject.h"
+#include "vm/NativeObject.h"
+#include "vm/SharedArrayObject.h"
+#include "vm/SharedMem.h"
+
+namespace js {
+
+/*
+ * ArrayBufferViewObject
+ *
+ * Common base class for all array buffer views (DataViewObject and
+ * TypedArrayObject).
+ */
+
+class ArrayBufferViewObject : public NativeObject {
+ public:
+ // Underlying (Shared)ArrayBufferObject.
+ static constexpr size_t BUFFER_SLOT = 0;
+ static_assert(BUFFER_SLOT == JS_TYPEDARRAYLAYOUT_BUFFER_SLOT,
+ "self-hosted code with burned-in constants must get the "
+ "right buffer slot");
+
+ // Slot containing length of the view in number of typed elements.
+ static constexpr size_t LENGTH_SLOT = 1;
+
+ // Offset of view within underlying (Shared)ArrayBufferObject.
+ static constexpr size_t BYTEOFFSET_SLOT = 2;
+
+ // Pointer to raw buffer memory.
+ static constexpr size_t DATA_SLOT = 3;
+
+ static constexpr size_t RESERVED_SLOTS = 4;
+
+#ifdef DEBUG
+ static const uint8_t ZeroLengthArrayData = 0x4A;
+#endif
+
+ static constexpr int bufferOffset() {
+ return NativeObject::getFixedSlotOffset(BUFFER_SLOT);
+ }
+ static constexpr int lengthOffset() {
+ return NativeObject::getFixedSlotOffset(LENGTH_SLOT);
+ }
+ static constexpr int byteOffsetOffset() {
+ return NativeObject::getFixedSlotOffset(BYTEOFFSET_SLOT);
+ }
+ static constexpr int dataOffset() {
+ return NativeObject::getFixedSlotOffset(DATA_SLOT);
+ }
+
+ private:
+ void* dataPointerEither_() const {
+ // Note, do not check whether shared or not
+ // Keep synced with js::Get<Type>ArrayLengthAndData in jsfriendapi.h!
+ return maybePtrFromReservedSlot<void>(DATA_SLOT);
+ }
+
+ public:
+ [[nodiscard]] bool init(JSContext* cx, ArrayBufferObjectMaybeShared* buffer,
+ size_t byteOffset, size_t length,
+ uint32_t bytesPerElement);
+
+ static ArrayBufferObjectMaybeShared* bufferObject(
+ JSContext* cx, Handle<ArrayBufferViewObject*> obj);
+
+ void notifyBufferDetached();
+
+ void initDataPointer(SharedMem<uint8_t*> viewData) {
+ // Install a pointer to the buffer location that corresponds
+ // to offset zero within the typed array.
+ //
+ // The following unwrap is safe because the DATA_SLOT is
+ // accessed only from jitted code and from the
+ // dataPointerEither_() accessor above; in neither case does the
+ // raw pointer escape untagged into C++ code.
+ void* data = viewData.unwrap(/*safe - see above*/);
+ initReservedSlot(DATA_SLOT, PrivateValue(data));
+ }
+
+ SharedMem<void*> dataPointerShared() const {
+ return SharedMem<void*>::shared(dataPointerEither_());
+ }
+ SharedMem<void*> dataPointerEither() const {
+ if (isSharedMemory()) {
+ return SharedMem<void*>::shared(dataPointerEither_());
+ }
+ return SharedMem<void*>::unshared(dataPointerEither_());
+ }
+ void* dataPointerUnshared() const {
+ MOZ_ASSERT(!isSharedMemory());
+ return dataPointerEither_();
+ }
+
+ Value bufferValue() const { return getFixedSlot(BUFFER_SLOT); }
+ bool hasBuffer() const { return bufferValue().isObject(); }
+
+ ArrayBufferObject* bufferUnshared() const {
+ MOZ_ASSERT(!isSharedMemory());
+ ArrayBufferObjectMaybeShared* obj = bufferEither();
+ if (!obj) {
+ return nullptr;
+ }
+ return &obj->as<ArrayBufferObject>();
+ }
+ SharedArrayBufferObject* bufferShared() const {
+ MOZ_ASSERT(isSharedMemory());
+ ArrayBufferObjectMaybeShared* obj = bufferEither();
+ if (!obj) {
+ return nullptr;
+ }
+ return &obj->as<SharedArrayBufferObject>();
+ }
+ ArrayBufferObjectMaybeShared* bufferEither() const {
+ JSObject* obj = bufferValue().toObjectOrNull();
+ if (!obj) {
+ return nullptr;
+ }
+ MOZ_ASSERT(isSharedMemory() ? obj->is<SharedArrayBufferObject>()
+ : obj->is<ArrayBufferObject>());
+ return &obj->as<ArrayBufferObjectMaybeShared>();
+ }
+
+ bool hasDetachedBuffer() const {
+ // Shared buffers can't be detached.
+ if (isSharedMemory()) {
+ return false;
+ }
+
+ // A typed array with a null buffer has never had its buffer exposed to
+ // become detached.
+ ArrayBufferObject* buffer = bufferUnshared();
+ if (!buffer) {
+ return false;
+ }
+
+ return buffer->isDetached();
+ }
+
+ size_t byteOffset() const {
+ return size_t(getFixedSlot(BYTEOFFSET_SLOT).toPrivate());
+ }
+
+ Value byteOffsetValue() const {
+ size_t offset = byteOffset();
+ return NumberValue(offset);
+ }
+
+ static void trace(JSTracer* trc, JSObject* obj);
+};
+
+} // namespace js
+
+template <>
+bool JSObject::is<js::ArrayBufferViewObject>() const;
+
+#endif // vm_ArrayBufferViewObject_h
diff --git a/js/src/vm/ArrayObject-inl.h b/js/src/vm/ArrayObject-inl.h
new file mode 100644
index 0000000000..240a5f3aef
--- /dev/null
+++ b/js/src/vm/ArrayObject-inl.h
@@ -0,0 +1,87 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ArrayObject_inl_h
+#define vm_ArrayObject_inl_h
+
+#include "vm/ArrayObject.h"
+
+#include "gc/Allocator.h"
+#include "gc/GCProbes.h"
+
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+
+namespace js {
+
+/* static */ MOZ_ALWAYS_INLINE ArrayObject* ArrayObject::create(
+ JSContext* cx, gc::AllocKind kind, gc::Heap heap,
+ Handle<SharedShape*> shape, uint32_t length, uint32_t slotSpan,
+ AutoSetNewObjectMetadata& metadata, gc::AllocSite* site) {
+ debugCheckNewObject(shape, kind, heap);
+
+ const JSClass* clasp = &ArrayObject::class_;
+ MOZ_ASSERT(shape);
+ MOZ_ASSERT(shape->getObjectClass() == clasp);
+ MOZ_ASSERT(clasp->isNativeObject());
+ MOZ_ASSERT(!clasp->hasFinalize());
+
+ // Note: the slot span is passed as argument to allow more constant folding
+ // below for the common case of slotSpan == 0.
+ MOZ_ASSERT(shape->slotSpan() == slotSpan);
+
+ // Arrays can use their fixed slots to store elements, so can't have shapes
+ // which allow named properties to be stored in the fixed slots.
+ MOZ_ASSERT(shape->numFixedSlots() == 0);
+
+ size_t nDynamicSlots = calculateDynamicSlots(0, slotSpan, clasp);
+ ArrayObject* aobj = cx->newCell<ArrayObject>(kind, heap, clasp, site);
+ if (!aobj) {
+ return nullptr;
+ }
+
+ aobj->initShape(shape);
+ aobj->initFixedElements(kind, length);
+
+ if (!nDynamicSlots) {
+ aobj->initEmptyDynamicSlots();
+ } else if (!aobj->allocateInitialSlots(cx, nDynamicSlots)) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(clasp->shouldDelayMetadataBuilder());
+ cx->realm()->setObjectPendingMetadata(aobj);
+
+ if (slotSpan > 0) {
+ aobj->initDynamicSlots(slotSpan);
+ }
+
+ gc::gcprobes::CreateObject(aobj);
+ return aobj;
+}
+
+inline DenseElementResult ArrayObject::addDenseElementNoLengthChange(
+ JSContext* cx, uint32_t index, const Value& val) {
+ MOZ_ASSERT(isExtensible());
+
+ // Only support the `index < length` case so that we don't have to increase
+ // the array's .length value below.
+ if (index >= length() || containsDenseElement(index) || isIndexed()) {
+ return DenseElementResult::Incomplete;
+ }
+
+ DenseElementResult res = ensureDenseElements(cx, index, 1);
+ if (MOZ_UNLIKELY(res != DenseElementResult::Success)) {
+ return res;
+ }
+
+ initDenseElement(index, val);
+ return DenseElementResult::Success;
+}
+
+} // namespace js
+
+#endif // vm_ArrayObject_inl_h
diff --git a/js/src/vm/ArrayObject.h b/js/src/vm/ArrayObject.h
new file mode 100644
index 0000000000..8a10710dd8
--- /dev/null
+++ b/js/src/vm/ArrayObject.h
@@ -0,0 +1,62 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ArrayObject_h
+#define vm_ArrayObject_h
+
+#include "vm/NativeObject.h"
+
+namespace js {
+
+class AutoSetNewObjectMetadata;
+
+class ArrayObject : public NativeObject {
+ public:
+ // Array(x) eagerly allocates dense elements if x <= this value. Without
+ // the subtraction the max would roll over to the next power-of-two (4096)
+ // due to the way that growElements() and goodAllocated() work.
+ static const uint32_t EagerAllocationMaxLength =
+ 2048 - ObjectElements::VALUES_PER_HEADER;
+
+ static const JSClass class_;
+
+ bool lengthIsWritable() const {
+ return !getElementsHeader()->hasNonwritableArrayLength();
+ }
+
+ uint32_t length() const { return getElementsHeader()->length; }
+
+ void setNonWritableLength(JSContext* cx) {
+ shrinkCapacityToInitializedLength(cx);
+ getElementsHeader()->setNonwritableArrayLength();
+ }
+
+ void setLength(uint32_t length) {
+ MOZ_ASSERT(lengthIsWritable());
+ MOZ_ASSERT_IF(length != getElementsHeader()->length,
+ !denseElementsAreFrozen());
+ getElementsHeader()->length = length;
+ }
+
+ // Try to add a new dense element to this array. The array must be extensible.
+ //
+ // Returns DenseElementResult::Incomplete if `index >= length`, if the array
+ // has sparse elements, if we're adding a sparse element, or if the array
+ // already contains a dense element at this index.
+ inline DenseElementResult addDenseElementNoLengthChange(JSContext* cx,
+ uint32_t index,
+ const Value& val);
+
+ // Make an array object with the specified initial state.
+ static MOZ_ALWAYS_INLINE ArrayObject* create(
+ JSContext* cx, gc::AllocKind kind, gc::Heap heap,
+ Handle<SharedShape*> shape, uint32_t length, uint32_t slotSpan,
+ AutoSetNewObjectMetadata& metadata, gc::AllocSite* site = nullptr);
+};
+
+} // namespace js
+
+#endif // vm_ArrayObject_h
diff --git a/js/src/vm/AsyncFunction.cpp b/js/src/vm/AsyncFunction.cpp
new file mode 100644
index 0000000000..7b2482f2a9
--- /dev/null
+++ b/js/src/vm/AsyncFunction.cpp
@@ -0,0 +1,349 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/AsyncFunction.h"
+
+#include "mozilla/Maybe.h"
+
+#include "jsapi.h"
+
+#include "builtin/ModuleObject.h"
+#include "builtin/Promise.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/GeneratorObject.h"
+#include "vm/GlobalObject.h"
+#include "vm/Interpreter.h"
+#include "vm/Modules.h"
+#include "vm/NativeObject.h"
+#include "vm/PromiseObject.h" // js::PromiseObject
+#include "vm/Realm.h"
+#include "vm/SelfHosting.h"
+
+#include "vm/JSContext-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+
+using mozilla::Maybe;
+
+static JSObject* CreateAsyncFunction(JSContext* cx, JSProtoKey key) {
+ RootedObject proto(cx, &cx->global()->getFunctionConstructor());
+ Handle<PropertyName*> name = cx->names().AsyncFunction;
+ return NewFunctionWithProto(cx, AsyncFunctionConstructor, 1,
+ FunctionFlags::NATIVE_CTOR, nullptr, name, proto,
+ gc::AllocKind::FUNCTION, TenuredObject);
+}
+
+static JSObject* CreateAsyncFunctionPrototype(JSContext* cx, JSProtoKey key) {
+ return NewTenuredObjectWithFunctionPrototype(cx, cx->global());
+}
+
+static bool AsyncFunctionClassFinish(JSContext* cx, HandleObject asyncFunction,
+ HandleObject asyncFunctionProto) {
+ // Change the "constructor" property to non-writable before adding any other
+ // properties, so it's still the last property and can be modified without a
+ // dictionary-mode transition.
+ MOZ_ASSERT(asyncFunctionProto->as<NativeObject>().getLastProperty().key() ==
+ NameToId(cx->names().constructor));
+ MOZ_ASSERT(!asyncFunctionProto->as<NativeObject>().inDictionaryMode());
+
+ RootedValue asyncFunctionVal(cx, ObjectValue(*asyncFunction));
+ if (!DefineDataProperty(cx, asyncFunctionProto, cx->names().constructor,
+ asyncFunctionVal, JSPROP_READONLY)) {
+ return false;
+ }
+ MOZ_ASSERT(!asyncFunctionProto->as<NativeObject>().inDictionaryMode());
+
+ return DefineToStringTag(cx, asyncFunctionProto, cx->names().AsyncFunction);
+}
+
+static const ClassSpec AsyncFunctionClassSpec = {
+ CreateAsyncFunction,
+ CreateAsyncFunctionPrototype,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ AsyncFunctionClassFinish,
+ ClassSpec::DontDefineConstructor};
+
+const JSClass js::AsyncFunctionClass = {"AsyncFunction", 0, JS_NULL_CLASS_OPS,
+ &AsyncFunctionClassSpec};
+
+enum class ResumeKind { Normal, Throw };
+
+/**
+ * ES2022 draft rev d03c1ec6e235a5180fa772b6178727c17974cb14
+ *
+ * Await in async function
+ * https://tc39.es/ecma262/#await
+ *
+ * Unified implementation of
+ *
+ * Step 3. fulfilledClosure Abstract Closure.
+ * Step 5. rejectedClosure Abstract Closure.
+ */
+static bool AsyncFunctionResume(JSContext* cx,
+ Handle<AsyncFunctionGeneratorObject*> generator,
+ ResumeKind kind, HandleValue valueOrReason) {
+ // We're enqueuing the promise job for Await before suspending the execution
+ // of the async function. So when either the debugger or OOM errors terminate
+ // the execution after JSOp::AsyncAwait, but before JSOp::Await, we're in an
+ // inconsistent state, because we don't have a resume index set and therefore
+ // don't know where to resume the async function. Return here in that case.
+ if (generator->isClosed()) {
+ return true;
+ }
+
+ // The debugger sets the async function's generator object into the "running"
+ // state while firing debugger events to ensure the debugger can't re-enter
+ // the async function, cf. |AutoSetGeneratorRunning| in Debugger.cpp. Catch
+ // this case here by checking if the generator is already runnning.
+ if (generator->isRunning()) {
+ return true;
+ }
+
+ Rooted<PromiseObject*> resultPromise(cx, generator->promise());
+
+ RootedObject stack(cx);
+ Maybe<JS::AutoSetAsyncStackForNewCalls> asyncStack;
+ if (JSObject* allocationSite = resultPromise->allocationSite()) {
+ // The promise is created within the activation of the async function, so
+ // use the parent frame as the starting point for async stacks.
+ stack = allocationSite->as<SavedFrame>().getParent();
+ if (stack) {
+ asyncStack.emplace(
+ cx, stack, "async",
+ JS::AutoSetAsyncStackForNewCalls::AsyncCallKind::EXPLICIT);
+ }
+ }
+
+ MOZ_ASSERT(generator->isSuspended(),
+ "non-suspended generator when resuming async function");
+
+ // Step {3,5}.a. Let prevContext be the running execution context.
+ // Step {3,5}.b. Suspend prevContext.
+ // Step {3,5}.c. Push asyncContext onto the execution context stack;
+ // asyncContext is now the running execution context.
+ //
+ // fulfilledClosure
+ // Step 3.d. Resume the suspended evaluation of asyncContext using
+ // NormalCompletion(value) as the result of the operation that
+ // suspended it.
+ //
+ // rejectedClosure
+ // Step 5.d. Resume the suspended evaluation of asyncContext using
+ // ThrowCompletion(reason) as the result of the operation that
+ // suspended it.
+ //
+ // Execution context switching is handled in generator.
+ Handle<PropertyName*> funName = kind == ResumeKind::Normal
+ ? cx->names().AsyncFunctionNext
+ : cx->names().AsyncFunctionThrow;
+ FixedInvokeArgs<1> args(cx);
+ args[0].set(valueOrReason);
+ RootedValue generatorOrValue(cx, ObjectValue(*generator));
+ if (!CallSelfHostedFunction(cx, funName, generatorOrValue, args,
+ &generatorOrValue)) {
+ if (!generator->isClosed()) {
+ generator->setClosed();
+ }
+
+ // Handle the OOM case mentioned above.
+ if (resultPromise->state() == JS::PromiseState::Pending &&
+ cx->isExceptionPending()) {
+ RootedValue exn(cx);
+ if (!GetAndClearException(cx, &exn)) {
+ return false;
+ }
+ return AsyncFunctionThrown(cx, resultPromise, exn);
+ }
+ return false;
+ }
+
+ // Step {3,f}.e. Assert: When we reach this step, asyncContext has already
+ // been removed from the execution context stack and
+ // prevContext is the currently running execution context.
+ // Step {3,f}.f. Return undefined.
+ MOZ_ASSERT_IF(generator->isClosed(), generatorOrValue.isObject());
+ MOZ_ASSERT_IF(generator->isClosed(),
+ &generatorOrValue.toObject() == resultPromise);
+ MOZ_ASSERT_IF(!generator->isClosed(), generator->isAfterAwait());
+
+ return true;
+}
+
+/**
+ * ES2022 draft rev d03c1ec6e235a5180fa772b6178727c17974cb14
+ *
+ * Await in async function
+ * https://tc39.es/ecma262/#await
+ *
+ * Step 3. fulfilledClosure Abstract Closure.
+ */
+[[nodiscard]] bool js::AsyncFunctionAwaitedFulfilled(
+ JSContext* cx, Handle<AsyncFunctionGeneratorObject*> generator,
+ HandleValue value) {
+ return AsyncFunctionResume(cx, generator, ResumeKind::Normal, value);
+}
+
+/**
+ * ES2022 draft rev d03c1ec6e235a5180fa772b6178727c17974cb14
+ *
+ * Await in async function
+ * https://tc39.es/ecma262/#await
+ *
+ * Step 5. rejectedClosure Abstract Closure.
+ */
+[[nodiscard]] bool js::AsyncFunctionAwaitedRejected(
+ JSContext* cx, Handle<AsyncFunctionGeneratorObject*> generator,
+ HandleValue reason) {
+ return AsyncFunctionResume(cx, generator, ResumeKind::Throw, reason);
+}
+
+JSObject* js::AsyncFunctionResolve(
+ JSContext* cx, Handle<AsyncFunctionGeneratorObject*> generator,
+ HandleValue valueOrReason, AsyncFunctionResolveKind resolveKind) {
+ Rooted<PromiseObject*> promise(cx, generator->promise());
+ if (resolveKind == AsyncFunctionResolveKind::Fulfill) {
+ if (!AsyncFunctionReturned(cx, promise, valueOrReason)) {
+ return nullptr;
+ }
+ } else {
+ if (!AsyncFunctionThrown(cx, promise, valueOrReason)) {
+ return nullptr;
+ }
+ }
+ return promise;
+}
+
+const JSClass AsyncFunctionGeneratorObject::class_ = {
+ "AsyncFunctionGenerator",
+ JSCLASS_HAS_RESERVED_SLOTS(AsyncFunctionGeneratorObject::RESERVED_SLOTS),
+ &classOps_,
+};
+
+const JSClassOps AsyncFunctionGeneratorObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ nullptr, // finalize
+ nullptr, // call
+ nullptr, // construct
+ CallTraceMethod<AbstractGeneratorObject>, // trace
+};
+
+AsyncFunctionGeneratorObject* AsyncFunctionGeneratorObject::create(
+ JSContext* cx, HandleFunction fun) {
+ MOZ_ASSERT(fun->isAsync() && !fun->isGenerator());
+
+ Rooted<PromiseObject*> resultPromise(cx, CreatePromiseObjectForAsync(cx));
+ if (!resultPromise) {
+ return nullptr;
+ }
+
+ auto* obj = NewBuiltinClassInstance<AsyncFunctionGeneratorObject>(cx);
+ if (!obj) {
+ return nullptr;
+ }
+ obj->initFixedSlot(PROMISE_SLOT, ObjectValue(*resultPromise));
+
+ // Starts in the running state.
+ obj->setResumeIndex(AbstractGeneratorObject::RESUME_INDEX_RUNNING);
+
+ return obj;
+}
+
+JSFunction* NewHandler(JSContext* cx, Native handler,
+ JS::Handle<JSObject*> target) {
+ cx->check(target);
+
+ JS::Handle<PropertyName*> funName = cx->names().empty;
+ JS::Rooted<JSFunction*> handlerFun(
+ cx, NewNativeFunction(cx, handler, 0, funName,
+ gc::AllocKind::FUNCTION_EXTENDED, GenericObject));
+ if (!handlerFun) {
+ return nullptr;
+ }
+ handlerFun->setExtendedSlot(FunctionExtended::MODULE_SLOT,
+ JS::ObjectValue(*target));
+ return handlerFun;
+}
+
+static bool AsyncModuleExecutionFulfilledHandler(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ JSFunction& func = args.callee().as<JSFunction>();
+
+ Rooted<ModuleObject*> module(
+ cx, &func.getExtendedSlot(FunctionExtended::MODULE_SLOT)
+ .toObject()
+ .as<ModuleObject>());
+ AsyncModuleExecutionFulfilled(cx, module);
+ args.rval().setUndefined();
+ return true;
+}
+
+static bool AsyncModuleExecutionRejectedHandler(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ JSFunction& func = args.callee().as<JSFunction>();
+ Rooted<ModuleObject*> module(
+ cx, &func.getExtendedSlot(FunctionExtended::MODULE_SLOT)
+ .toObject()
+ .as<ModuleObject>());
+ AsyncModuleExecutionRejected(cx, module, args.get(0));
+ args.rval().setUndefined();
+ return true;
+}
+
+AsyncFunctionGeneratorObject* AsyncFunctionGeneratorObject::create(
+ JSContext* cx, Handle<ModuleObject*> module) {
+ // TODO: Module is currently hitching a ride with
+ // AsyncFunctionGeneratorObject. The reason for this is we have some work in
+ // the JITs that make use of this object when we hit AsyncAwait bytecode. At
+ // the same time, top level await shares a lot of it's implementation with
+ // AsyncFunction. I am not sure if the best thing to do here is inherit,
+ // override, or do something else. Comments appreciated.
+ MOZ_ASSERT(module->script()->isAsync());
+
+ Rooted<PromiseObject*> resultPromise(cx, CreatePromiseObjectForAsync(cx));
+ if (!resultPromise) {
+ return nullptr;
+ }
+
+ Rooted<AsyncFunctionGeneratorObject*> obj(
+ cx, NewBuiltinClassInstance<AsyncFunctionGeneratorObject>(cx));
+ if (!obj) {
+ return nullptr;
+ }
+ obj->initFixedSlot(PROMISE_SLOT, ObjectValue(*resultPromise));
+
+ RootedObject onFulfilled(
+ cx, NewHandler(cx, AsyncModuleExecutionFulfilledHandler, module));
+ if (!onFulfilled) {
+ return nullptr;
+ }
+
+ RootedObject onRejected(
+ cx, NewHandler(cx, AsyncModuleExecutionRejectedHandler, module));
+ if (!onRejected) {
+ return nullptr;
+ }
+
+ if (!JS::AddPromiseReactionsIgnoringUnhandledRejection(
+ cx, resultPromise, onFulfilled, onRejected)) {
+ return nullptr;
+ }
+
+ // Starts in the running state.
+ obj->setResumeIndex(AbstractGeneratorObject::RESUME_INDEX_RUNNING);
+
+ return obj;
+}
diff --git a/js/src/vm/AsyncFunction.h b/js/src/vm/AsyncFunction.h
new file mode 100644
index 0000000000..dbcfa2aec5
--- /dev/null
+++ b/js/src/vm/AsyncFunction.h
@@ -0,0 +1,324 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_AsyncFunction_h
+#define vm_AsyncFunction_h
+
+#include "js/Class.h"
+#include "vm/AsyncFunctionResolveKind.h" // AsyncFunctionResolveKind
+#include "vm/GeneratorObject.h"
+#include "vm/JSObject.h"
+#include "vm/PromiseObject.h"
+
+// [SMDOC] Async functions
+//
+// # Implementation
+//
+// Async functions are implemented based on generators, in terms of
+// suspend/resume.
+// Instead of returning the generator object itself, they return the async
+// function's result promise to the caller.
+//
+// The async function's result promise is stored in the generator object
+// (js::AsyncFunctionGeneratorObject) and retrieved from it whenever the
+// execution needs it.
+//
+//
+// # Start
+//
+// When an async function is called, it synchronously runs until the first
+// `await` or `return`. This works just like a normal function.
+//
+// This corresponds to steps 1-3, 5-9 of AsyncFunctionStart.
+//
+// AsyncFunctionStart ( promiseCapability, asyncFunctionBody )
+// https://tc39.es/ecma262/#sec-async-functions-abstract-operations-async-function-start
+//
+// 1. Let runningContext be the running execution context.
+// 2. Let asyncContext be a copy of runningContext.
+// 3. NOTE: Copying the execution state is required for the step below to
+// resume its execution. It is ill-defined to resume a currently executing
+// context.
+// ...
+// 5. Push asyncContext onto the execution context stack; asyncContext is now
+// the running execution context.
+// 6. Resume the suspended evaluation of asyncContext. Let result be the value
+// returned by the resumed computation.
+// 7. Assert: When we return here, asyncContext has already been removed from
+// the execution context stack and runningContext is the currently running
+// execution context.
+// 8. Assert: result is a normal completion with a value of undefined. The
+// possible sources of completion values are Await or, if the async
+// function doesn't await anything, step 4.g above.
+// 9. Return.
+//
+// Unlike generators, async functions don't contain JSOp::InitialYield and
+// don't suspend immediately when call.
+//
+//
+// # Return
+//
+// Explicit/implicit `return` is implemented with the following bytecode
+// sequence:
+//
+// ```
+// GetAliasedVar ".generator" # VALUE .generator
+// AsyncResolve 0 # PROMISE
+// SetRval #
+// GetAliasedVar ".generator" # .generator
+// FinalYieldRval #
+// ```
+//
+// JSOp::Resolve (js::AsyncFunctionResolve) resolves the current async
+// function's result promise. Then this sets it as the function's return value.
+// (The return value is observable if the caller is still on the stack--
+// that is, the async function is returning without ever awaiting.
+// Otherwise we're returning to the microtask loop, which ignores the
+// return value.)
+//
+// This corresponds to AsyncFunctionStart steps 4.a-e. 4.g.
+//
+// 4. Set the code evaluation state of asyncContext such that when evaluation
+// is resumed for that execution context the following steps will be
+// performed:
+// a. Let result be the result of evaluating asyncFunctionBody.
+// b. Assert: If we return here, the async function either threw an
+// exception or performed an implicit or explicit return; all awaiting
+// is done.
+// c. Remove asyncContext from the execution context stack and restore the
+// execution context that is at the top of the execution context stack as
+// the running execution context.
+// d. If result.[[Type]] is normal, then
+// i. Perform
+// ! Call(promiseCapability.[[Resolve]], undefined, «undefined»).
+// e. Else if result.[[Type]] is return, then
+// i. Perform
+// ! Call(promiseCapability.[[Resolve]], undefined,
+// «result.[[Value]]»).
+// ...
+// g. Return.
+//
+//
+// # Throw
+//
+// The body part of an async function is enclosed by an implicit try-catch
+// block, to catch `throw` completion of the function body.
+//
+// If an exception is thrown by the function body, the catch block catches it
+// and rejects the async function's result promise.
+//
+// If there's an expression in parameters, the entire parameters part is also
+// enclosed by a separate implicit try-catch block.
+//
+// ```
+// Try #
+// (parameter expressions here) #
+// Goto BODY #
+//
+// JumpTarget from try #
+// Exception # EXCEPTION
+// GetAliasedVar ".generator" # EXCEPTION .generator
+// AsyncResolve 1 # PROMISE
+// SetRval #
+// GetAliasedVar ".generator" # .generator
+// FinalYieldRval #
+//
+// BODY:
+// JumpTarget #
+// Try #
+// (body here) #
+//
+// JumpTarget from try #
+// Exception # EXCEPTION
+// GetAliasedVar ".generator" # EXCEPTION .generator
+// AsyncResolve 1 # PROMISE
+// SetRval #
+// GetAliasedVar ".generator" # .generator
+// FinalYieldRval #
+// ```
+//
+// This corresponds to AsyncFunctionStart steps 4.f-g.
+//
+// 4. ...
+// f. Else,
+// i. Assert: result.[[Type]] is throw.
+// ii. Perform
+// ! Call(promiseCapability.[[Reject]], undefined,
+// «result.[[Value]]»).
+// g. Return.
+//
+//
+// # Await
+//
+// `await` is implemented with the following bytecode sequence:
+// (ignoring CanSkipAwait for now, see "Optimization for await" section)
+//
+// ```
+// (operand here) # VALUE
+// GetAliasedVar ".generator" # VALUE .generator
+// AsyncAwait # PROMISE
+//
+// GetAliasedVar ".generator" # PROMISE .generator
+// Await 0 # RVAL GENERATOR RESUMEKIND
+//
+// AfterYield # RVAL GENERATOR RESUMEKIND
+// CheckResumeKind # RVAL
+// ```
+//
+// JSOp::AsyncAwait corresponds to Await steps 1-9, and JSOp::Await corresponds
+// to Await steps 10-12 in the spec.
+//
+// See the next section for JSOp::CheckResumeKind.
+//
+// After them, the async function is suspended, and if this is the first await
+// in the execution, the async function's result promise is returned to the
+// caller.
+//
+// Await
+// https://tc39.es/ecma262/#await
+//
+// 1. Let asyncContext be the running execution context.
+// 2. Let promise be ? PromiseResolve(%Promise%, value).
+// 3. Let stepsFulfilled be the algorithm steps defined in Await Fulfilled
+// Functions.
+// 4. Let onFulfilled be ! CreateBuiltinFunction(stepsFulfilled, «
+// [[AsyncContext]] »).
+// 5. Set onFulfilled.[[AsyncContext]] to asyncContext.
+// 6. Let stepsRejected be the algorithm steps defined in Await Rejected
+// Functions.
+// 7. Let onRejected be ! CreateBuiltinFunction(stepsRejected, «
+// [[AsyncContext]] »).
+// 8. Set onRejected.[[AsyncContext]] to asyncContext.
+// 9. Perform ! PerformPromiseThen(promise, onFulfilled, onRejected).
+// 10. Remove asyncContext from the execution context stack and restore the
+// execution context that is at the top of the execution context stack as
+// the running execution context.
+// 11. Set the code evaluation state of asyncContext such that when evaluation
+// is resumed with a Completion completion, the following steps of the
+// algorithm that invoked Await will be performed, with completion
+// available.
+// 12. Return.
+// 13. NOTE: This returns to the evaluation of the operation that had most
+// previously resumed evaluation of asyncContext.
+//
+// (See comments above AsyncAwait and Await in js/src/vm/Opcodes.h for more
+// details)
+//
+//
+// # Reaction jobs and resume after await
+//
+// When an async function performs `await` and the operand becomes settled, a
+// new reaction job for the operand is enqueued to the job queue.
+//
+// The reaction record for the job is marked as "this is for async function"
+// (see js::AsyncFunctionAwait), and handled specially in
+// js::PromiseReactionJob.
+//
+// When the await operand resolves (either with fulfillment or rejection),
+// the async function is resumed from the job queue, by calling
+// js::AsyncFunctionAwaitedFulfilled or js::AsyncFunctionAwaitedRejected
+// from js::AsyncFunctionPromiseReactionJob.
+//
+// The execution resumes from JSOp::AfterYield, with the resolved value
+// and the resume kind, either normal or throw, corresponds to fulfillment or
+// rejection, on the stack.
+//
+// The resume kind is handled by JSOp::CheckResumeKind after that.
+//
+// If the resume kind is normal (=fulfillment), the async function resumes
+// the execution with the resolved value as the result of `await`.
+//
+// If the resume kind is throw (=rejection), it throws the resolved value,
+// and it will be caught by the try-catch explained above.
+//
+//
+// # Optimization for await
+//
+// Suspending the execution and going into the embedding's job queue is slow
+// and hard to optimize.
+//
+// If the following conditions are met, we don't have to perform the above
+// but just use the await operand as the result of await.
+//
+// 1. The await operand is either non-promise or already-fulfilled promise,
+// so that the result value is already known
+// 2. There's no jobs in the job queue,
+// so that we don't have to perform other jobs before resuming from
+// await
+// 3. Promise constructor/prototype are not modified,
+// so that the optimization isn't visible to the user code
+//
+// This is implemented by the following bytecode sequence:
+//
+// ```
+// (operand here) # VALUE
+//
+// CanSkipAwait # VALUE, CAN_SKIP
+// MaybeExtractAwaitValue # VALUE_OR_RVAL, CAN_SKIP
+// JumpIfTrue END # VALUE
+//
+// JumpTarget # VALUE
+// GetAliasedVar ".generator" # VALUE .generator
+// Await 0 # RVAL GENERATOR RESUMEKIND
+// AfterYield # RVAL GENERATOR RESUMEKIND
+// CheckResumeKind # RVAL
+//
+// END:
+// JumpTarget # RVAL
+// ```
+//
+// JSOp::CanSkipAwait checks the above conditions. MaybeExtractAwaitValue will
+// replace Value if it can be skipped, and then the await is jumped over.
+
+namespace js {
+
+class AsyncFunctionGeneratorObject;
+
+extern const JSClass AsyncFunctionClass;
+
+// Resume the async function when the `await` operand resolves.
+// Split into two functions depending on whether the awaited value was
+// fulfilled or rejected.
+[[nodiscard]] bool AsyncFunctionAwaitedFulfilled(
+ JSContext* cx, Handle<AsyncFunctionGeneratorObject*> generator,
+ HandleValue value);
+
+[[nodiscard]] bool AsyncFunctionAwaitedRejected(
+ JSContext* cx, Handle<AsyncFunctionGeneratorObject*> generator,
+ HandleValue reason);
+
+// Resolve the async function's promise object with the given value and then
+// return the promise object.
+JSObject* AsyncFunctionResolve(JSContext* cx,
+ Handle<AsyncFunctionGeneratorObject*> generator,
+ HandleValue valueOrReason,
+ AsyncFunctionResolveKind resolveKind);
+
+class AsyncFunctionGeneratorObject : public AbstractGeneratorObject {
+ public:
+ enum {
+ PROMISE_SLOT = AbstractGeneratorObject::RESERVED_SLOTS,
+
+ RESERVED_SLOTS
+ };
+
+ static const JSClass class_;
+ static const JSClassOps classOps_;
+
+ static AsyncFunctionGeneratorObject* create(JSContext* cx,
+ HandleFunction asyncGen);
+
+ static AsyncFunctionGeneratorObject* create(JSContext* cx,
+ Handle<ModuleObject*> module);
+
+ PromiseObject* promise() {
+ return &getFixedSlot(PROMISE_SLOT).toObject().as<PromiseObject>();
+ }
+};
+
+} // namespace js
+
+#endif /* vm_AsyncFunction_h */
diff --git a/js/src/vm/AsyncFunctionResolveKind.h b/js/src/vm/AsyncFunctionResolveKind.h
new file mode 100644
index 0000000000..75adfcec3c
--- /dev/null
+++ b/js/src/vm/AsyncFunctionResolveKind.h
@@ -0,0 +1,18 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_AsyncFunctionResolveKind_h
+#define vm_AsyncFunctionResolveKind_h
+
+#include <stdint.h> // uint8_t
+
+namespace js {
+
+enum class AsyncFunctionResolveKind : uint8_t { Fulfill, Reject };
+
+} // namespace js
+
+#endif /* vm_AsyncFunctionResolveKind_h */
diff --git a/js/src/vm/AsyncIteration.cpp b/js/src/vm/AsyncIteration.cpp
new file mode 100644
index 0000000000..293fc8c31c
--- /dev/null
+++ b/js/src/vm/AsyncIteration.cpp
@@ -0,0 +1,1484 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/AsyncIteration.h"
+
+#include "builtin/Promise.h" // js::PromiseHandler, js::CreatePromiseObjectForAsyncGenerator, js::AsyncFromSyncIteratorMethod, js::ResolvePromiseInternal, js::RejectPromiseInternal, js::InternalAsyncGeneratorAwait
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/PropertySpec.h"
+#include "vm/CompletionKind.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/GeneratorObject.h"
+#include "vm/GlobalObject.h"
+#include "vm/Interpreter.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/PromiseObject.h" // js::PromiseObject
+#include "vm/Realm.h"
+#include "vm/SelfHosting.h"
+#include "vm/WellKnownAtom.h" // js_*_str
+
+#include "vm/JSObject-inl.h"
+#include "vm/List-inl.h"
+
+using namespace js;
+
+// ---------------
+// Async generator
+// ---------------
+
+const JSClass AsyncGeneratorObject::class_ = {
+ "AsyncGenerator",
+ JSCLASS_HAS_RESERVED_SLOTS(AsyncGeneratorObject::Slots),
+ &classOps_,
+};
+
+const JSClassOps AsyncGeneratorObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ nullptr, // finalize
+ nullptr, // call
+ nullptr, // construct
+ CallTraceMethod<AbstractGeneratorObject>, // trace
+};
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// OrdinaryCreateFromConstructor ( constructor, intrinsicDefaultProto
+// [ , internalSlotsList ] )
+// https://tc39.es/ecma262/#sec-ordinarycreatefromconstructor
+//
+// specialized for AsyncGeneratorObjects.
+static AsyncGeneratorObject* OrdinaryCreateFromConstructorAsynGen(
+ JSContext* cx, HandleFunction constructor) {
+ // Step 1: Assert...
+ // (implicit)
+
+ // Step 2. Let proto be
+ // ? GetPrototypeFromConstructor(constructor, intrinsicDefaultProto).
+ RootedValue protoVal(cx);
+ if (!GetProperty(cx, constructor, constructor, cx->names().prototype,
+ &protoVal)) {
+ return nullptr;
+ }
+
+ RootedObject proto(cx, protoVal.isObject() ? &protoVal.toObject() : nullptr);
+ if (!proto) {
+ proto = GlobalObject::getOrCreateAsyncGeneratorPrototype(cx, cx->global());
+ if (!proto) {
+ return nullptr;
+ }
+ }
+
+ // Step 3. Return ! OrdinaryObjectCreate(proto, internalSlotsList).
+ return NewObjectWithGivenProto<AsyncGeneratorObject>(cx, proto);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorStart ( generator, generatorBody )
+// https://tc39.es/ecma262/#sec-asyncgeneratorstart
+//
+// Steps 6-7.
+/* static */
+AsyncGeneratorObject* AsyncGeneratorObject::create(JSContext* cx,
+ HandleFunction asyncGen) {
+ MOZ_ASSERT(asyncGen->isAsync() && asyncGen->isGenerator());
+
+ AsyncGeneratorObject* generator =
+ OrdinaryCreateFromConstructorAsynGen(cx, asyncGen);
+ if (!generator) {
+ return nullptr;
+ }
+
+ // Step 6. Set generator.[[AsyncGeneratorState]] to suspendedStart.
+ generator->setSuspendedStart();
+
+ // Step 7. Set generator.[[AsyncGeneratorQueue]] to a new empty List.
+ generator->clearSingleQueueRequest();
+
+ generator->clearCachedRequest();
+
+ return generator;
+}
+
+/* static */
+AsyncGeneratorRequest* AsyncGeneratorObject::createRequest(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator,
+ CompletionKind completionKind, HandleValue completionValue,
+ Handle<PromiseObject*> promise) {
+ if (!generator->hasCachedRequest()) {
+ return AsyncGeneratorRequest::create(cx, completionKind, completionValue,
+ promise);
+ }
+
+ AsyncGeneratorRequest* request = generator->takeCachedRequest();
+ request->init(completionKind, completionValue, promise);
+ return request;
+}
+
+/* static */ [[nodiscard]] bool AsyncGeneratorObject::enqueueRequest(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator,
+ Handle<AsyncGeneratorRequest*> request) {
+ if (generator->isSingleQueue()) {
+ if (generator->isSingleQueueEmpty()) {
+ generator->setSingleQueueRequest(request);
+ return true;
+ }
+
+ Rooted<ListObject*> queue(cx, ListObject::create(cx));
+ if (!queue) {
+ return false;
+ }
+
+ RootedValue requestVal(cx, ObjectValue(*generator->singleQueueRequest()));
+ if (!queue->append(cx, requestVal)) {
+ return false;
+ }
+ requestVal = ObjectValue(*request);
+ if (!queue->append(cx, requestVal)) {
+ return false;
+ }
+
+ generator->setQueue(queue);
+ return true;
+ }
+
+ Rooted<ListObject*> queue(cx, generator->queue());
+ RootedValue requestVal(cx, ObjectValue(*request));
+ return queue->append(cx, requestVal);
+}
+
+/* static */
+AsyncGeneratorRequest* AsyncGeneratorObject::dequeueRequest(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator) {
+ if (generator->isSingleQueue()) {
+ AsyncGeneratorRequest* request = generator->singleQueueRequest();
+ generator->clearSingleQueueRequest();
+ return request;
+ }
+
+ Rooted<ListObject*> queue(cx, generator->queue());
+ return &queue->popFirstAs<AsyncGeneratorRequest>(cx);
+}
+
+/* static */
+AsyncGeneratorRequest* AsyncGeneratorObject::peekRequest(
+ Handle<AsyncGeneratorObject*> generator) {
+ if (generator->isSingleQueue()) {
+ return generator->singleQueueRequest();
+ }
+
+ return &generator->queue()->getAs<AsyncGeneratorRequest>(0);
+}
+
+const JSClass AsyncGeneratorRequest::class_ = {
+ "AsyncGeneratorRequest",
+ JSCLASS_HAS_RESERVED_SLOTS(AsyncGeneratorRequest::Slots)};
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorRequest Records
+// https://tc39.es/ecma262/#sec-asyncgeneratorrequest-records
+/* static */
+AsyncGeneratorRequest* AsyncGeneratorRequest::create(
+ JSContext* cx, CompletionKind completionKind, HandleValue completionValue,
+ Handle<PromiseObject*> promise) {
+ AsyncGeneratorRequest* request =
+ NewObjectWithGivenProto<AsyncGeneratorRequest>(cx, nullptr);
+ if (!request) {
+ return nullptr;
+ }
+
+ request->init(completionKind, completionValue, promise);
+ return request;
+}
+
+[[nodiscard]] static bool AsyncGeneratorResume(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator,
+ CompletionKind completionKind, HandleValue argument);
+
+[[nodiscard]] static bool AsyncGeneratorDrainQueue(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator);
+
+[[nodiscard]] static bool AsyncGeneratorCompleteStepNormal(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator, HandleValue value,
+ bool done);
+
+[[nodiscard]] static bool AsyncGeneratorCompleteStepThrow(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator,
+ HandleValue exception);
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorStart ( generator, generatorBody )
+// https://tc39.es/ecma262/#sec-asyncgeneratorstart
+//
+// Steps 4.e-j. "return" case.
+[[nodiscard]] static bool AsyncGeneratorReturned(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator, HandleValue value) {
+ // Step 4.e. Set generator.[[AsyncGeneratorState]] to completed.
+ generator->setCompleted();
+
+ // Step 4.g. If result.[[Type]] is return, set result to
+ // NormalCompletion(result.[[Value]]).
+ // (implicit)
+
+ // Step 4.h. Perform ! AsyncGeneratorCompleteStep(generator, result, true).
+ if (!AsyncGeneratorCompleteStepNormal(cx, generator, value, true)) {
+ return false;
+ }
+
+ // Step 4.i. Perform ! AsyncGeneratorDrainQueue(generator).
+ // Step 4.j. Return undefined.
+ return AsyncGeneratorDrainQueue(cx, generator);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorStart ( generator, generatorBody )
+// https://tc39.es/ecma262/#sec-asyncgeneratorstart
+//
+// Steps 4.e-j. "throw" case.
+[[nodiscard]] static bool AsyncGeneratorThrown(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator) {
+ // Step 4.e. Set generator.[[AsyncGeneratorState]] to completed.
+ generator->setCompleted();
+
+ // Not much we can do about uncatchable exceptions, so just bail.
+ if (!cx->isExceptionPending()) {
+ return false;
+ }
+
+ // Step 4.h. Perform ! AsyncGeneratorCompleteStep(generator, result, true).
+ RootedValue value(cx);
+ if (!GetAndClearException(cx, &value)) {
+ return false;
+ }
+ if (!AsyncGeneratorCompleteStepThrow(cx, generator, value)) {
+ return false;
+ }
+
+ // Step 4.i. Perform ! AsyncGeneratorDrainQueue(generator).
+ // Step 4.j. Return undefined.
+ return AsyncGeneratorDrainQueue(cx, generator);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorUnwrapYieldResumption ( resumptionValue )
+// https://tc39.es/ecma262/#sec-asyncgeneratorunwrapyieldresumption
+//
+// Steps 4-5.
+[[nodiscard]] static bool AsyncGeneratorYieldReturnAwaitedFulfilled(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator, HandleValue value) {
+ MOZ_ASSERT(generator->isAwaitingYieldReturn(),
+ "YieldReturn-Await fulfilled when not in "
+ "'AwaitingYieldReturn' state");
+
+ // Step 4. Assert: awaited.[[Type]] is normal.
+ // Step 5. Return Completion { [[Type]]: return, [[Value]]:
+ // awaited.[[Value]], [[Target]]: empty }.
+ return AsyncGeneratorResume(cx, generator, CompletionKind::Return, value);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorUnwrapYieldResumption ( resumptionValue )
+// https://tc39.es/ecma262/#sec-asyncgeneratorunwrapyieldresumption
+//
+// Step 3.
+[[nodiscard]] static bool AsyncGeneratorYieldReturnAwaitedRejected(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator,
+ HandleValue reason) {
+ MOZ_ASSERT(
+ generator->isAwaitingYieldReturn(),
+ "YieldReturn-Await rejected when not in 'AwaitingYieldReturn' state");
+
+ // Step 3. If awaited.[[Type]] is throw, return Completion(awaited).
+ return AsyncGeneratorResume(cx, generator, CompletionKind::Throw, reason);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorUnwrapYieldResumption ( resumptionValue )
+// https://tc39.es/ecma262/#sec-asyncgeneratorunwrapyieldresumption
+//
+// Steps 1-2.
+[[nodiscard]] static bool AsyncGeneratorUnwrapYieldResumptionAndResume(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator,
+ CompletionKind completionKind, HandleValue resumptionValue) {
+ // Step 1. If resumptionValue.[[Type]] is not return, return
+ // Completion(resumptionValue).
+ if (completionKind != CompletionKind::Return) {
+ return AsyncGeneratorResume(cx, generator, completionKind, resumptionValue);
+ }
+
+ // Step 2. Let awaited be Await(resumptionValue.[[Value]]).
+ //
+ // Since we don't have the place that handles return from yield
+ // inside the generator, handle the case here, with extra state
+ // State_AwaitingYieldReturn.
+ generator->setAwaitingYieldReturn();
+
+ const PromiseHandler onFulfilled =
+ PromiseHandler::AsyncGeneratorYieldReturnAwaitedFulfilled;
+ const PromiseHandler onRejected =
+ PromiseHandler::AsyncGeneratorYieldReturnAwaitedRejected;
+
+ return InternalAsyncGeneratorAwait(cx, generator, resumptionValue,
+ onFulfilled, onRejected);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorYield ( value )
+// https://tc39.es/ecma262/#sec-asyncgeneratoryield
+//
+// Stesp 10-13.
+[[nodiscard]] static bool AsyncGeneratorYield(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator, HandleValue value) {
+ // Step 10. Perform
+ // ! AsyncGeneratorCompleteStep(generator, completion, false,
+ // previousRealm).
+ if (!AsyncGeneratorCompleteStepNormal(cx, generator, value, false)) {
+ return false;
+ }
+
+ // Step 11. Let queue be generator.[[AsyncGeneratorQueue]].
+ // Step 12. If queue is not empty, then
+ // Step 13. Else,
+ // (reordered)
+ if (generator->isQueueEmpty()) {
+ // Step 13.a. Set generator.[[AsyncGeneratorState]] to suspendedYield.
+ generator->setSuspendedYield();
+
+ // Steps 13.b-c are done in caller.
+
+ // Step 13.d. Return undefined.
+ return true;
+ }
+
+ // Step 12. If queue is not empty, then
+ // Step 12.a. NOTE: Execution continues without suspending the generator.
+
+ // Step 12.b. Let toYield be the first element of queue.
+ Rooted<AsyncGeneratorRequest*> toYield(
+ cx, AsyncGeneratorObject::peekRequest(generator));
+ if (!toYield) {
+ return false;
+ }
+
+ // Step 12.c. Let resumptionValue be toYield.[[Completion]].
+ CompletionKind completionKind = toYield->completionKind();
+ RootedValue resumptionValue(cx, toYield->completionValue());
+
+ // Step 12.d. Return AsyncGeneratorUnwrapYieldResumption(resumptionValue).
+ return AsyncGeneratorUnwrapYieldResumptionAndResume(
+ cx, generator, completionKind, resumptionValue);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// Await in async function
+// https://tc39.es/ecma262/#await
+//
+// Steps 3.c-f.
+[[nodiscard]] static bool AsyncGeneratorAwaitedFulfilled(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator, HandleValue value) {
+ MOZ_ASSERT(generator->isExecuting(),
+ "Await fulfilled when not in 'Executing' state");
+
+ // Step 3.c. Push asyncContext onto the execution context stack; asyncContext
+ // is now the running execution context.
+ // Step 3.d. Resume the suspended evaluation of asyncContext using
+ // NormalCompletion(value) as the result of the operation that
+ // suspended it.
+ // Step 3.f. Return undefined.
+ return AsyncGeneratorResume(cx, generator, CompletionKind::Normal, value);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// Await in async function
+// https://tc39.es/ecma262/#await
+//
+// Steps 5.c-f.
+[[nodiscard]] static bool AsyncGeneratorAwaitedRejected(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator,
+ HandleValue reason) {
+ MOZ_ASSERT(generator->isExecuting(),
+ "Await rejected when not in 'Executing' state");
+
+ // Step 5.c. Push asyncContext onto the execution context stack; asyncContext
+ // is now the running execution context.
+ // Step 5.d. Resume the suspended evaluation of asyncContext using
+ // ThrowCompletion(reason) as the result of the operation that
+ // suspended it.
+ // Step 5.f. Return undefined.
+ return AsyncGeneratorResume(cx, generator, CompletionKind::Throw, reason);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// Await in async function
+// https://tc39.es/ecma262/#await
+[[nodiscard]] static bool AsyncGeneratorAwait(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator, HandleValue value) {
+ return InternalAsyncGeneratorAwait(
+ cx, generator, value, PromiseHandler::AsyncGeneratorAwaitedFulfilled,
+ PromiseHandler::AsyncGeneratorAwaitedRejected);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorCompleteStep ( generator, completion, done [ , realm ] )
+// https://tc39.es/ecma262/#sec-asyncgeneratorcompletestep
+//
+// "normal" case.
+[[nodiscard]] static bool AsyncGeneratorCompleteStepNormal(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator, HandleValue value,
+ bool done) {
+ // Step 1. Let queue be generator.[[AsyncGeneratorQueue]].
+ // Step 2. Assert: queue is not empty.
+ MOZ_ASSERT(!generator->isQueueEmpty());
+
+ // Step 3. Let next be the first element of queue.
+ // Step 4. Remove the first element from queue.
+ AsyncGeneratorRequest* next =
+ AsyncGeneratorObject::dequeueRequest(cx, generator);
+ if (!next) {
+ return false;
+ }
+
+ // Step 5. Let promiseCapability be next.[[Capability]].
+ Rooted<PromiseObject*> resultPromise(cx, next->promise());
+
+ generator->cacheRequest(next);
+
+ // Step 6. Let value be completion.[[Value]].
+ // (passed by caller)
+
+ // Step 7. If completion.[[Type]] is throw, then
+ // Step 8. Else,
+ // Step 8.a. Assert: completion.[[Type]] is normal.
+
+ // Step 8.b. If realm is present, then
+ // (skipped)
+ // Step 8.c. Else,
+
+ // Step 8.c.i. Let iteratorResult be ! CreateIterResultObject(value, done).
+ JSObject* resultObj = CreateIterResultObject(cx, value, done);
+ if (!resultObj) {
+ return false;
+ }
+
+ // Step 8.d. Perform
+ // ! Call(promiseCapability.[[Resolve]], undefined,
+ // « iteratorResult »).
+ RootedValue resultValue(cx, ObjectValue(*resultObj));
+ return ResolvePromiseInternal(cx, resultPromise, resultValue);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorCompleteStep ( generator, completion, done [ , realm ] )
+// https://tc39.es/ecma262/#sec-asyncgeneratorcompletestep
+//
+// "throw" case.
+[[nodiscard]] static bool AsyncGeneratorCompleteStepThrow(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator,
+ HandleValue exception) {
+ // Step 1. Let queue be generator.[[AsyncGeneratorQueue]].
+ // Step 2. Assert: queue is not empty.
+ MOZ_ASSERT(!generator->isQueueEmpty());
+
+ // Step 3. Let next be the first element of queue.
+ // Step 4. Remove the first element from queue.
+ AsyncGeneratorRequest* next =
+ AsyncGeneratorObject::dequeueRequest(cx, generator);
+ if (!next) {
+ return false;
+ }
+
+ // Step 5. Let promiseCapability be next.[[Capability]].
+ Rooted<PromiseObject*> resultPromise(cx, next->promise());
+
+ generator->cacheRequest(next);
+
+ // Step 6. Let value be completion.[[Value]].
+ // (passed by caller)
+
+ // Step 7. If completion.[[Type]] is throw, then
+ // Step 7.a. Perform
+ // ! Call(promiseCapability.[[Reject]], undefined, « value »).
+ return RejectPromiseInternal(cx, resultPromise, exception);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorAwaitReturn ( generator )
+// https://tc39.es/ecma262/#sec-asyncgeneratorawaitreturn
+//
+// Steps 7.a-e.
+[[nodiscard]] static bool AsyncGeneratorAwaitReturnFulfilled(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator, HandleValue value) {
+ MOZ_ASSERT(generator->isAwaitingReturn(),
+ "AsyncGeneratorResumeNext-Return fulfilled when not in "
+ "'AwaitingReturn' state");
+
+ // Step 7.a. Set generator.[[AsyncGeneratorState]] to completed.
+ generator->setCompleted();
+
+ // Step 7.b. Let result be NormalCompletion(value).
+ // Step 7.c. Perform ! AsyncGeneratorCompleteStep(generator, result, true).
+ if (!AsyncGeneratorCompleteStepNormal(cx, generator, value, true)) {
+ return false;
+ }
+
+ // Step 7.d. Perform ! AsyncGeneratorDrainQueue(generator).
+ // Step 7.e. Return undefined.
+ return AsyncGeneratorDrainQueue(cx, generator);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorAwaitReturn ( generator )
+// https://tc39.es/ecma262/#sec-asyncgeneratorawaitreturn
+//
+// Steps 9.a-e.
+[[nodiscard]] static bool AsyncGeneratorAwaitReturnRejected(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator, HandleValue value) {
+ MOZ_ASSERT(generator->isAwaitingReturn(),
+ "AsyncGeneratorResumeNext-Return rejected when not in "
+ "'AwaitingReturn' state");
+
+ // Step 9.a. Set generator.[[AsyncGeneratorState]] to completed.
+ generator->setCompleted();
+
+ // Step 9.b. Let result be ThrowCompletion(reason).
+ // Step 9.c. Perform ! AsyncGeneratorCompleteStep(generator, result, true).
+ if (!AsyncGeneratorCompleteStepThrow(cx, generator, value)) {
+ return false;
+ }
+
+ // Step 9.d. Perform ! AsyncGeneratorDrainQueue(generator).
+ // Step 9.e. Return undefined.
+ return AsyncGeneratorDrainQueue(cx, generator);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorAwaitReturn ( generator )
+// https://tc39.es/ecma262/#sec-asyncgeneratorawaitreturn
+[[nodiscard]] static bool AsyncGeneratorAwaitReturn(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator, HandleValue next) {
+ // Step 1. Let queue be generator.[[AsyncGeneratorQueue]].
+ // Step 2. Assert: queue is not empty.
+ MOZ_ASSERT(!generator->isQueueEmpty());
+
+ // Step 3. Let next be the first element of queue.
+ // (passed by caller)
+
+ // Step 4. Let completion be next.[[Completion]].
+ // Step 5. Assert: completion.[[Type]] is return.
+ // (implicit)
+
+ // Steps 6-11.
+ return InternalAsyncGeneratorAwait(
+ cx, generator, next, PromiseHandler::AsyncGeneratorAwaitReturnFulfilled,
+ PromiseHandler::AsyncGeneratorAwaitReturnRejected);
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorDrainQueue ( generator )
+// https://tc39.es/ecma262/#sec-asyncgeneratordrainqueue
+[[nodiscard]] static bool AsyncGeneratorDrainQueue(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator) {
+ // Step 1. Assert: generator.[[AsyncGeneratorState]] is completed.
+ MOZ_ASSERT(generator->isCompleted());
+
+ // Step 2. Let queue be generator.[[AsyncGeneratorQueue]].
+ // Step 3. If queue is empty, return.
+ if (generator->isQueueEmpty()) {
+ return true;
+ }
+
+ // Step 4. Let done be false.
+ // (implicit)
+
+ // Step 5. Repeat, while done is false,
+ while (true) {
+ // Step 5.a. Let next be the first element of queue.
+ Rooted<AsyncGeneratorRequest*> next(
+ cx, AsyncGeneratorObject::peekRequest(generator));
+ if (!next) {
+ return false;
+ }
+
+ // Step 5.b. Let completion be next.[[Completion]].
+ CompletionKind completionKind = next->completionKind();
+
+ // Step 5.c. If completion.[[Type]] is return, then
+ if (completionKind == CompletionKind::Return) {
+ RootedValue value(cx, next->completionValue());
+
+ // Step 5.c.i. Set generator.[[AsyncGeneratorState]] to awaiting-return.
+ generator->setAwaitingReturn();
+
+ // Step 5.c.ii. Perform ! AsyncGeneratorAwaitReturn(generator).
+ // Step 5.c.iii. Set done to true.
+ return AsyncGeneratorAwaitReturn(cx, generator, value);
+ }
+
+ // Step 5.d. Else,
+ if (completionKind == CompletionKind::Throw) {
+ RootedValue value(cx, next->completionValue());
+
+ // Step 5.d.ii. Perform
+ // ! AsyncGeneratorCompleteStep(generator, completion, true).
+ if (!AsyncGeneratorCompleteStepThrow(cx, generator, value)) {
+ return false;
+ }
+ } else {
+ // Step 5.d.i. If completion.[[Type]] is normal, then
+ // Step 5.d.i.1. Set completion to NormalCompletion(undefined).
+ // Step 5.d.ii. Perform
+ // ! AsyncGeneratorCompleteStep(generator, completion, true).
+ if (!AsyncGeneratorCompleteStepNormal(cx, generator, UndefinedHandleValue,
+ true)) {
+ return false;
+ }
+ }
+
+ // Step 5.d.iii. If queue is empty, set done to true.
+ if (generator->isQueueEmpty()) {
+ return true;
+ }
+ }
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorValidate ( generator, generatorBrand )
+// https://tc39.es/ecma262/#sec-asyncgeneratorvalidate
+//
+// Testing part.
+[[nodiscard]] static bool IsAsyncGeneratorValid(HandleValue asyncGenVal) {
+ // Step 1. Perform
+ // ? RequireInternalSlot(generator, [[AsyncGeneratorContext]]).
+ // Step 2. Perform
+ // ? RequireInternalSlot(generator, [[AsyncGeneratorState]]).
+ // Step 3. Perform
+ // ? RequireInternalSlot(generator, [[AsyncGeneratorQueue]]).
+ // Step 4. If generator.[[GeneratorBrand]] is not the same value as
+ // generatorBrand, throw a TypeError exception.
+ return asyncGenVal.isObject() &&
+ asyncGenVal.toObject().canUnwrapAs<AsyncGeneratorObject>();
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorValidate ( generator, generatorBrand )
+// https://tc39.es/ecma262/#sec-asyncgeneratorvalidate
+//
+// Throwing part.
+[[nodiscard]] static bool AsyncGeneratorValidateThrow(
+ JSContext* cx, MutableHandleValue result) {
+ Rooted<PromiseObject*> resultPromise(
+ cx, CreatePromiseObjectForAsyncGenerator(cx));
+ if (!resultPromise) {
+ return false;
+ }
+
+ RootedValue badGeneratorError(cx);
+ if (!GetTypeError(cx, JSMSG_NOT_AN_ASYNC_GENERATOR, &badGeneratorError)) {
+ return false;
+ }
+
+ if (!RejectPromiseInternal(cx, resultPromise, badGeneratorError)) {
+ return false;
+ }
+
+ result.setObject(*resultPromise);
+ return true;
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorEnqueue ( generator, completion, promiseCapability )
+// https://tc39.es/ecma262/#sec-asyncgeneratorenqueue
+[[nodiscard]] static bool AsyncGeneratorEnqueue(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator,
+ CompletionKind completionKind, HandleValue completionValue,
+ Handle<PromiseObject*> resultPromise) {
+ // Step 1. Let request be
+ // AsyncGeneratorRequest { [[Completion]]: completion,
+ // [[Capability]]: promiseCapability }.
+ Rooted<AsyncGeneratorRequest*> request(
+ cx, AsyncGeneratorObject::createRequest(cx, generator, completionKind,
+ completionValue, resultPromise));
+ if (!request) {
+ return false;
+ }
+
+ // Step 2. Append request to the end of generator.[[AsyncGeneratorQueue]].
+ return AsyncGeneratorObject::enqueueRequest(cx, generator, request);
+}
+
+class MOZ_STACK_CLASS MaybeEnterAsyncGeneratorRealm {
+ mozilla::Maybe<AutoRealm> ar_;
+
+ public:
+ MaybeEnterAsyncGeneratorRealm() = default;
+ ~MaybeEnterAsyncGeneratorRealm() = default;
+
+ // Enter async generator's realm, and wrap the method's argument value if
+ // necessary.
+ [[nodiscard]] bool maybeEnterAndWrap(JSContext* cx,
+ Handle<AsyncGeneratorObject*> generator,
+ MutableHandleValue value) {
+ if (generator->compartment() == cx->compartment()) {
+ return true;
+ }
+
+ ar_.emplace(cx, generator);
+ return cx->compartment()->wrap(cx, value);
+ }
+
+ // Leave async generator's realm, and wrap the method's result value if
+ // necessary.
+ [[nodiscard]] bool maybeLeaveAndWrap(JSContext* cx,
+ MutableHandleValue result) {
+ if (!ar_) {
+ return true;
+ }
+ ar_.reset();
+
+ return cx->compartment()->wrap(cx, result);
+ }
+};
+
+[[nodiscard]] static bool AsyncGeneratorMethodSanityCheck(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator) {
+ if (generator->isCompleted() || generator->isSuspendedStart() ||
+ generator->isSuspendedYield()) {
+ // The spec assumes the queue is empty when async generator methods are
+ // called with those state, but our debugger allows calling those methods
+ // in unexpected state, such as before suspendedStart.
+ if (MOZ_UNLIKELY(!generator->isQueueEmpty())) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SUSPENDED_QUEUE_NOT_EMPTY);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGenerator.prototype.next ( value )
+// https://tc39.es/ecma262/#sec-asyncgenerator-prototype-next
+bool js::AsyncGeneratorNext(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Step 3. Let result be AsyncGeneratorValidate(generator, empty).
+ // Step 4. IfAbruptRejectPromise(result, promiseCapability).
+ // (reordered)
+ if (!IsAsyncGeneratorValid(args.thisv())) {
+ return AsyncGeneratorValidateThrow(cx, args.rval());
+ }
+
+ // Step 1. Let generator be the this value.
+ // (implicit)
+ Rooted<AsyncGeneratorObject*> generator(
+ cx, &args.thisv().toObject().unwrapAs<AsyncGeneratorObject>());
+
+ MaybeEnterAsyncGeneratorRealm maybeEnterRealm;
+
+ RootedValue completionValue(cx, args.get(0));
+ if (!maybeEnterRealm.maybeEnterAndWrap(cx, generator, &completionValue)) {
+ return false;
+ }
+
+ // Step 2. Let promiseCapability be ! NewPromiseCapability(%Promise%).
+ Rooted<PromiseObject*> resultPromise(
+ cx, CreatePromiseObjectForAsyncGenerator(cx));
+ if (!resultPromise) {
+ return false;
+ }
+
+ if (!AsyncGeneratorMethodSanityCheck(cx, generator)) {
+ return false;
+ }
+
+ // Step 5. Let state be generator.[[AsyncGeneratorState]].
+ // Step 6. If state is completed, then
+ if (generator->isCompleted()) {
+ // Step 6.a. Let iteratorResult be
+ // ! CreateIterResultObject(undefined, true).
+ JSObject* resultObj =
+ CreateIterResultObject(cx, UndefinedHandleValue, true);
+ if (!resultObj) {
+ return false;
+ }
+
+ // Step 6.b. Perform
+ // ! Call(promiseCapability.[[Resolve]], undefined,
+ // « iteratorResult »).
+ RootedValue resultValue(cx, ObjectValue(*resultObj));
+ if (!ResolvePromiseInternal(cx, resultPromise, resultValue)) {
+ return false;
+ }
+ } else {
+ // Step 7. Let completion be NormalCompletion(value).
+ // Step 8. Perform
+ // ! AsyncGeneratorEnqueue(generator, completion,
+ // promiseCapability).
+ if (!AsyncGeneratorEnqueue(cx, generator, CompletionKind::Normal,
+ completionValue, resultPromise)) {
+ return false;
+ }
+
+ // Step 9. If state is either suspendedStart or suspendedYield, then
+ if (generator->isSuspendedStart() || generator->isSuspendedYield()) {
+ RootedValue resumptionValue(cx, completionValue);
+ // Step 9.a. Perform ! AsyncGeneratorResume(generator, completion).
+ if (!AsyncGeneratorResume(cx, generator, CompletionKind::Normal,
+ resumptionValue)) {
+ return false;
+ }
+ } else {
+ // Step 10. Else,
+ // Step 10.a. Assert: state is either executing or awaiting-return.
+ MOZ_ASSERT(generator->isExecuting() || generator->isAwaitingReturn() ||
+ generator->isAwaitingYieldReturn());
+ }
+ }
+
+ // Step 6.c. Return promiseCapability.[[Promise]].
+ // and
+ // Step 11. Return promiseCapability.[[Promise]].
+ args.rval().setObject(*resultPromise);
+
+ return maybeEnterRealm.maybeLeaveAndWrap(cx, args.rval());
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGenerator.prototype.return ( value )
+// https://tc39.es/ecma262/#sec-asyncgenerator-prototype-return
+bool js::AsyncGeneratorReturn(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Step 3. Let result be AsyncGeneratorValidate(generator, empty).
+ // Step 4. IfAbruptRejectPromise(result, promiseCapability).
+ // (reordered)
+ if (!IsAsyncGeneratorValid(args.thisv())) {
+ return AsyncGeneratorValidateThrow(cx, args.rval());
+ }
+
+ // Step 1. Let generator be the this value.
+ Rooted<AsyncGeneratorObject*> generator(
+ cx, &args.thisv().toObject().unwrapAs<AsyncGeneratorObject>());
+
+ MaybeEnterAsyncGeneratorRealm maybeEnterRealm;
+
+ RootedValue completionValue(cx, args.get(0));
+ if (!maybeEnterRealm.maybeEnterAndWrap(cx, generator, &completionValue)) {
+ return false;
+ }
+
+ // Step 2. Let promiseCapability be ! NewPromiseCapability(%Promise%).
+ Rooted<PromiseObject*> resultPromise(
+ cx, CreatePromiseObjectForAsyncGenerator(cx));
+ if (!resultPromise) {
+ return false;
+ }
+
+ if (!AsyncGeneratorMethodSanityCheck(cx, generator)) {
+ return false;
+ }
+
+ // Step 5. Let completion be
+ // Completion { [[Type]]: return, [[Value]]: value,
+ // [[Target]]: empty }.
+ // Step 6. Perform
+ // ! AsyncGeneratorEnqueue(generator, completion, promiseCapability).
+ if (!AsyncGeneratorEnqueue(cx, generator, CompletionKind::Return,
+ completionValue, resultPromise)) {
+ return false;
+ }
+
+ // Step 7. Let state be generator.[[AsyncGeneratorState]].
+ // Step 8. If state is either suspendedStart or completed, then
+ if (generator->isSuspendedStart() || generator->isCompleted()) {
+ // Step 8.a. Set generator.[[AsyncGeneratorState]] to awaiting-return.
+ generator->setAwaitingReturn();
+
+ // Step 8.b. Perform ! AsyncGeneratorAwaitReturn(generator).
+ if (!AsyncGeneratorAwaitReturn(cx, generator, completionValue)) {
+ return false;
+ }
+ } else if (generator->isSuspendedYield()) {
+ // Step 9. Else if state is suspendedYield, then
+
+ // Step 9.a. Perform ! AsyncGeneratorResume(generator, completion).
+ if (!AsyncGeneratorUnwrapYieldResumptionAndResume(
+ cx, generator, CompletionKind::Return, completionValue)) {
+ return false;
+ }
+ } else {
+ // Step 10. Else,
+ // Step 10.a. Assert: state is either executing or awaiting-return.
+ MOZ_ASSERT(generator->isExecuting() || generator->isAwaitingReturn() ||
+ generator->isAwaitingYieldReturn());
+ }
+
+ // Step 11. Return promiseCapability.[[Promise]].
+ args.rval().setObject(*resultPromise);
+
+ return maybeEnterRealm.maybeLeaveAndWrap(cx, args.rval());
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGenerator.prototype.throw ( exception )
+// https://tc39.es/ecma262/#sec-asyncgenerator-prototype-throw
+bool js::AsyncGeneratorThrow(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Step 3. Let result be AsyncGeneratorValidate(generator, empty).
+ // Step 4. IfAbruptRejectPromise(result, promiseCapability).
+ // (reordered)
+ if (!IsAsyncGeneratorValid(args.thisv())) {
+ return AsyncGeneratorValidateThrow(cx, args.rval());
+ }
+
+ // Step 1. Let generator be the this value.
+ Rooted<AsyncGeneratorObject*> generator(
+ cx, &args.thisv().toObject().unwrapAs<AsyncGeneratorObject>());
+
+ MaybeEnterAsyncGeneratorRealm maybeEnterRealm;
+
+ RootedValue completionValue(cx, args.get(0));
+ if (!maybeEnterRealm.maybeEnterAndWrap(cx, generator, &completionValue)) {
+ return false;
+ }
+
+ // Step 2. Let promiseCapability be ! NewPromiseCapability(%Promise%).
+ Rooted<PromiseObject*> resultPromise(
+ cx, CreatePromiseObjectForAsyncGenerator(cx));
+ if (!resultPromise) {
+ return false;
+ }
+
+ if (!AsyncGeneratorMethodSanityCheck(cx, generator)) {
+ return false;
+ }
+
+ // Step 5. Let state be generator.[[AsyncGeneratorState]].
+ // Step 6. If state is suspendedStart, then
+ if (generator->isSuspendedStart()) {
+ // Step 6.a. Set generator.[[AsyncGeneratorState]] to completed.
+ // Step 6.b. Set state to completed.
+ generator->setCompleted();
+ }
+
+ // Step 7. If state is completed, then
+ if (generator->isCompleted()) {
+ // Step 7.a. Perform
+ // ! Call(promiseCapability.[[Reject]], undefined, « exception »).
+ if (!RejectPromiseInternal(cx, resultPromise, completionValue)) {
+ return false;
+ }
+ } else {
+ // Step 8. Let completion be ThrowCompletion(exception).
+ // Step 9. Perform
+ // ! AsyncGeneratorEnqueue(generator, completion,
+ // promiseCapability).
+ if (!AsyncGeneratorEnqueue(cx, generator, CompletionKind::Throw,
+ completionValue, resultPromise)) {
+ return false;
+ }
+
+ // Step 10. If state is suspendedYield, then
+ if (generator->isSuspendedYield()) {
+ // Step 10.a. Perform ! AsyncGeneratorResume(generator, completion).
+ if (!AsyncGeneratorResume(cx, generator, CompletionKind::Throw,
+ completionValue)) {
+ return false;
+ }
+ } else {
+ // Step 11. Else,
+ // Step 11.a. Assert: state is either executing or awaiting-return.
+ MOZ_ASSERT(generator->isExecuting() || generator->isAwaitingReturn() ||
+ generator->isAwaitingYieldReturn());
+ }
+ }
+
+ // Step 7.b. Return promiseCapability.[[Promise]].
+ // and
+ // Step 12. Return promiseCapability.[[Promise]].
+ args.rval().setObject(*resultPromise);
+
+ return maybeEnterRealm.maybeLeaveAndWrap(cx, args.rval());
+}
+
+// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+//
+// AsyncGeneratorResume ( generator, completion )
+// https://tc39.es/ecma262/#sec-asyncgeneratorresume
+[[nodiscard]] static bool AsyncGeneratorResume(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator,
+ CompletionKind completionKind, HandleValue argument) {
+ MOZ_ASSERT(!generator->isClosed(),
+ "closed generator when resuming async generator");
+ MOZ_ASSERT(generator->isSuspended(),
+ "non-suspended generator when resuming async generator");
+
+ // Step 1. Assert: generator.[[AsyncGeneratorState]] is either
+ // suspendedStart or suspendedYield.
+ //
+ // NOTE: We're using suspend/resume also for await. and the state can be
+ // anything.
+
+ // Steps 2-4 are handled in generator.
+
+ // Step 5. Set generator.[[AsyncGeneratorState]] to executing.
+ generator->setExecuting();
+
+ // Step 6. Push genContext onto the execution context stack; genContext is
+ // now the running execution context.
+ // Step 7. Resume the suspended evaluation of genContext using completion as
+ // the result of the operation that suspended it. Let result be the
+ // completion record returned by the resumed computation.
+ Handle<PropertyName*> funName = completionKind == CompletionKind::Normal
+ ? cx->names().AsyncGeneratorNext
+ : completionKind == CompletionKind::Throw
+ ? cx->names().AsyncGeneratorThrow
+ : cx->names().AsyncGeneratorReturn;
+ FixedInvokeArgs<1> args(cx);
+ args[0].set(argument);
+ RootedValue thisOrRval(cx, ObjectValue(*generator));
+ if (!CallSelfHostedFunction(cx, funName, thisOrRval, args, &thisOrRval)) {
+ // 25.5.3.2, steps 5.f, 5.g.
+ if (!generator->isClosed()) {
+ generator->setClosed();
+ }
+ return AsyncGeneratorThrown(cx, generator);
+ }
+
+ // 6.2.3.1, steps 2-9.
+ if (generator->isAfterAwait()) {
+ return AsyncGeneratorAwait(cx, generator, thisOrRval);
+ }
+
+ // 25.5.3.7, steps 5-6, 9.
+ if (generator->isAfterYield()) {
+ return AsyncGeneratorYield(cx, generator, thisOrRval);
+ }
+
+ // 25.5.3.2, steps 5.d-g.
+ return AsyncGeneratorReturned(cx, generator, thisOrRval);
+}
+
+static const JSFunctionSpec async_generator_methods[] = {
+ JS_FN("next", js::AsyncGeneratorNext, 1, 0),
+ JS_FN("throw", js::AsyncGeneratorThrow, 1, 0),
+ JS_FN("return", js::AsyncGeneratorReturn, 1, 0), JS_FS_END};
+
+static JSObject* CreateAsyncGeneratorFunction(JSContext* cx, JSProtoKey key) {
+ RootedObject proto(cx, &cx->global()->getFunctionConstructor());
+ Handle<PropertyName*> name = cx->names().AsyncGeneratorFunction;
+
+ // ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+ //
+ // The AsyncGeneratorFunction Constructor
+ // https://tc39.es/ecma262/#sec-asyncgeneratorfunction-constructor
+ return NewFunctionWithProto(cx, AsyncGeneratorConstructor, 1,
+ FunctionFlags::NATIVE_CTOR, nullptr, name, proto,
+ gc::AllocKind::FUNCTION, TenuredObject);
+}
+
+static JSObject* CreateAsyncGeneratorFunctionPrototype(JSContext* cx,
+ JSProtoKey key) {
+ return NewTenuredObjectWithFunctionPrototype(cx, cx->global());
+}
+
+static bool AsyncGeneratorFunctionClassFinish(JSContext* cx,
+ HandleObject asyncGenFunction,
+ HandleObject asyncGenerator) {
+ Handle<GlobalObject*> global = cx->global();
+
+ // Change the "constructor" property to non-writable before adding any other
+ // properties, so it's still the last property and can be modified without a
+ // dictionary-mode transition.
+ MOZ_ASSERT(asyncGenerator->as<NativeObject>().getLastProperty().key() ==
+ NameToId(cx->names().constructor));
+ MOZ_ASSERT(!asyncGenerator->as<NativeObject>().inDictionaryMode());
+
+ RootedValue asyncGenFunctionVal(cx, ObjectValue(*asyncGenFunction));
+ if (!DefineDataProperty(cx, asyncGenerator, cx->names().constructor,
+ asyncGenFunctionVal, JSPROP_READONLY)) {
+ return false;
+ }
+ MOZ_ASSERT(!asyncGenerator->as<NativeObject>().inDictionaryMode());
+
+ RootedObject asyncIterProto(
+ cx, GlobalObject::getOrCreateAsyncIteratorPrototype(cx, global));
+ if (!asyncIterProto) {
+ return false;
+ }
+
+ // ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+ //
+ // AsyncGenerator Objects
+ // https://tc39.es/ecma262/#sec-asyncgenerator-objects
+ RootedObject asyncGenProto(cx, GlobalObject::createBlankPrototypeInheriting(
+ cx, &PlainObject::class_, asyncIterProto));
+ if (!asyncGenProto) {
+ return false;
+ }
+ if (!DefinePropertiesAndFunctions(cx, asyncGenProto, nullptr,
+ async_generator_methods) ||
+ !DefineToStringTag(cx, asyncGenProto, cx->names().AsyncGenerator)) {
+ return false;
+ }
+
+ // ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29
+ //
+ // Properties of the AsyncGeneratorFunction Prototype Object
+ // https://tc39.es/ecma262/#sec-properties-of-asyncgeneratorfunction-prototype
+ if (!LinkConstructorAndPrototype(cx, asyncGenerator, asyncGenProto,
+ JSPROP_READONLY, JSPROP_READONLY) ||
+ !DefineToStringTag(cx, asyncGenerator,
+ cx->names().AsyncGeneratorFunction)) {
+ return false;
+ }
+
+ global->setAsyncGeneratorPrototype(asyncGenProto);
+
+ return true;
+}
+
+static const ClassSpec AsyncGeneratorFunctionClassSpec = {
+ CreateAsyncGeneratorFunction,
+ CreateAsyncGeneratorFunctionPrototype,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ AsyncGeneratorFunctionClassFinish,
+ ClassSpec::DontDefineConstructor};
+
+const JSClass js::AsyncGeneratorFunctionClass = {
+ "AsyncGeneratorFunction", 0, JS_NULL_CLASS_OPS,
+ &AsyncGeneratorFunctionClassSpec};
+
+[[nodiscard]] bool js::AsyncGeneratorPromiseReactionJob(
+ JSContext* cx, PromiseHandler handler,
+ Handle<AsyncGeneratorObject*> generator, HandleValue argument) {
+ // Await's handlers don't return a value, nor throw any exceptions.
+ // They fail only on OOM.
+ switch (handler) {
+ case PromiseHandler::AsyncGeneratorAwaitedFulfilled:
+ return AsyncGeneratorAwaitedFulfilled(cx, generator, argument);
+
+ case PromiseHandler::AsyncGeneratorAwaitedRejected:
+ return AsyncGeneratorAwaitedRejected(cx, generator, argument);
+
+ case PromiseHandler::AsyncGeneratorAwaitReturnFulfilled:
+ return AsyncGeneratorAwaitReturnFulfilled(cx, generator, argument);
+
+ case PromiseHandler::AsyncGeneratorAwaitReturnRejected:
+ return AsyncGeneratorAwaitReturnRejected(cx, generator, argument);
+
+ case PromiseHandler::AsyncGeneratorYieldReturnAwaitedFulfilled:
+ return AsyncGeneratorYieldReturnAwaitedFulfilled(cx, generator, argument);
+
+ case PromiseHandler::AsyncGeneratorYieldReturnAwaitedRejected:
+ return AsyncGeneratorYieldReturnAwaitedRejected(cx, generator, argument);
+
+ default:
+ MOZ_CRASH("Bad handler in AsyncGeneratorPromiseReactionJob");
+ }
+}
+
+// ---------------------
+// AsyncFromSyncIterator
+// ---------------------
+
+const JSClass AsyncFromSyncIteratorObject::class_ = {
+ "AsyncFromSyncIteratorObject",
+ JSCLASS_HAS_RESERVED_SLOTS(AsyncFromSyncIteratorObject::Slots)};
+
+// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e
+// 25.1.4.1 CreateAsyncFromSyncIterator
+JSObject* js::CreateAsyncFromSyncIterator(JSContext* cx, HandleObject iter,
+ HandleValue nextMethod) {
+ // Steps 1-3.
+ return AsyncFromSyncIteratorObject::create(cx, iter, nextMethod);
+}
+
+// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e
+// 25.1.4.1 CreateAsyncFromSyncIterator
+/* static */
+JSObject* AsyncFromSyncIteratorObject::create(JSContext* cx, HandleObject iter,
+ HandleValue nextMethod) {
+ // Step 1.
+ RootedObject proto(cx,
+ GlobalObject::getOrCreateAsyncFromSyncIteratorPrototype(
+ cx, cx->global()));
+ if (!proto) {
+ return nullptr;
+ }
+
+ AsyncFromSyncIteratorObject* asyncIter =
+ NewObjectWithGivenProto<AsyncFromSyncIteratorObject>(cx, proto);
+ if (!asyncIter) {
+ return nullptr;
+ }
+
+ // Step 2.
+ asyncIter->init(iter, nextMethod);
+
+ // Step 3 (Call to 7.4.1 GetIterator).
+ // 7.4.1 GetIterator, steps 1-5 are a no-op (*).
+ // 7.4.1 GetIterator, steps 6-8 are implemented in bytecode.
+ //
+ // (*) With <https://github.com/tc39/ecma262/issues/1172> fixed.
+ return asyncIter;
+}
+
+// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e
+// 25.1.4.2.1 %AsyncFromSyncIteratorPrototype%.next
+static bool AsyncFromSyncIteratorNext(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return AsyncFromSyncIteratorMethod(cx, args, CompletionKind::Normal);
+}
+
+// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e
+// 25.1.4.2.2 %AsyncFromSyncIteratorPrototype%.return
+static bool AsyncFromSyncIteratorReturn(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return AsyncFromSyncIteratorMethod(cx, args, CompletionKind::Return);
+}
+
+// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e
+// 25.1.4.2.3 %AsyncFromSyncIteratorPrototype%.throw
+static bool AsyncFromSyncIteratorThrow(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return AsyncFromSyncIteratorMethod(cx, args, CompletionKind::Throw);
+}
+
+static const JSFunctionSpec async_from_sync_iter_methods[] = {
+ JS_FN("next", AsyncFromSyncIteratorNext, 1, 0),
+ JS_FN("throw", AsyncFromSyncIteratorThrow, 1, 0),
+ JS_FN("return", AsyncFromSyncIteratorReturn, 1, 0), JS_FS_END};
+
+bool GlobalObject::initAsyncFromSyncIteratorProto(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (global->hasBuiltinProto(ProtoKind::AsyncFromSyncIteratorProto)) {
+ return true;
+ }
+
+ RootedObject asyncIterProto(
+ cx, GlobalObject::getOrCreateAsyncIteratorPrototype(cx, global));
+ if (!asyncIterProto) {
+ return false;
+ }
+
+ // 25.1.4.2 The %AsyncFromSyncIteratorPrototype% Object
+ RootedObject asyncFromSyncIterProto(
+ cx, GlobalObject::createBlankPrototypeInheriting(cx, &PlainObject::class_,
+ asyncIterProto));
+ if (!asyncFromSyncIterProto) {
+ return false;
+ }
+ if (!DefinePropertiesAndFunctions(cx, asyncFromSyncIterProto, nullptr,
+ async_from_sync_iter_methods) ||
+ !DefineToStringTag(cx, asyncFromSyncIterProto,
+ cx->names().AsyncFromSyncIterator)) {
+ return false;
+ }
+
+ global->initBuiltinProto(ProtoKind::AsyncFromSyncIteratorProto,
+ asyncFromSyncIterProto);
+ return true;
+}
+
+// -------------
+// AsyncIterator
+// -------------
+
+static const JSFunctionSpec async_iterator_proto_methods[] = {
+ JS_SELF_HOSTED_SYM_FN(asyncIterator, "AsyncIteratorIdentity", 0, 0),
+ JS_FS_END};
+
+static const JSFunctionSpec async_iterator_proto_methods_with_helpers[] = {
+ JS_SELF_HOSTED_FN("map", "AsyncIteratorMap", 1, 0),
+ JS_SELF_HOSTED_FN("filter", "AsyncIteratorFilter", 1, 0),
+ JS_SELF_HOSTED_FN("take", "AsyncIteratorTake", 1, 0),
+ JS_SELF_HOSTED_FN("drop", "AsyncIteratorDrop", 1, 0),
+ JS_SELF_HOSTED_FN("asIndexedPairs", "AsyncIteratorAsIndexedPairs", 0, 0),
+ JS_SELF_HOSTED_FN("flatMap", "AsyncIteratorFlatMap", 1, 0),
+ JS_SELF_HOSTED_FN("reduce", "AsyncIteratorReduce", 1, 0),
+ JS_SELF_HOSTED_FN("toArray", "AsyncIteratorToArray", 0, 0),
+ JS_SELF_HOSTED_FN("forEach", "AsyncIteratorForEach", 1, 0),
+ JS_SELF_HOSTED_FN("some", "AsyncIteratorSome", 1, 0),
+ JS_SELF_HOSTED_FN("every", "AsyncIteratorEvery", 1, 0),
+ JS_SELF_HOSTED_FN("find", "AsyncIteratorFind", 1, 0),
+ JS_SELF_HOSTED_SYM_FN(asyncIterator, "AsyncIteratorIdentity", 0, 0),
+ JS_FS_END};
+
+bool GlobalObject::initAsyncIteratorProto(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (global->hasBuiltinProto(ProtoKind::AsyncIteratorProto)) {
+ return true;
+ }
+
+ // 25.1.3 The %AsyncIteratorPrototype% Object
+ RootedObject asyncIterProto(
+ cx, GlobalObject::createBlankPrototype<PlainObject>(cx, global));
+ if (!asyncIterProto) {
+ return false;
+ }
+ if (!DefinePropertiesAndFunctions(cx, asyncIterProto, nullptr,
+ async_iterator_proto_methods)) {
+ return false;
+ }
+
+ global->initBuiltinProto(ProtoKind::AsyncIteratorProto, asyncIterProto);
+ return true;
+}
+
+// https://tc39.es/proposal-iterator-helpers/#sec-asynciterator as of revision
+// 8f10db5.
+static bool AsyncIteratorConstructor(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Step 1.
+ if (!ThrowIfNotConstructing(cx, args, js_AsyncIterator_str)) {
+ return false;
+ }
+ // Throw TypeError if NewTarget is the active function object, preventing the
+ // Iterator constructor from being used directly.
+ if (args.callee() == args.newTarget().toObject()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BOGUS_CONSTRUCTOR, js_AsyncIterator_str);
+ return false;
+ }
+
+ // Step 2.
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_AsyncIterator,
+ &proto)) {
+ return false;
+ }
+
+ JSObject* obj = NewObjectWithClassProto<AsyncIteratorObject>(cx, proto);
+ if (!obj) {
+ return false;
+ }
+
+ args.rval().setObject(*obj);
+ return true;
+}
+
+static const ClassSpec AsyncIteratorObjectClassSpec = {
+ GenericCreateConstructor<AsyncIteratorConstructor, 0,
+ gc::AllocKind::FUNCTION>,
+ GenericCreatePrototype<AsyncIteratorObject>,
+ nullptr,
+ nullptr,
+ async_iterator_proto_methods_with_helpers,
+ nullptr,
+ nullptr,
+};
+
+const JSClass AsyncIteratorObject::class_ = {
+ js_AsyncIterator_str,
+ JSCLASS_HAS_CACHED_PROTO(JSProto_AsyncIterator),
+ JS_NULL_CLASS_OPS,
+ &AsyncIteratorObjectClassSpec,
+};
+
+const JSClass AsyncIteratorObject::protoClass_ = {
+ "AsyncIterator.prototype",
+ JSCLASS_HAS_CACHED_PROTO(JSProto_AsyncIterator),
+ JS_NULL_CLASS_OPS,
+ &AsyncIteratorObjectClassSpec,
+};
+
+// Iterator Helper proposal
+static const JSFunctionSpec async_iterator_helper_methods[] = {
+ JS_SELF_HOSTED_FN("next", "AsyncIteratorHelperNext", 1, 0),
+ JS_SELF_HOSTED_FN("return", "AsyncIteratorHelperReturn", 1, 0),
+ JS_SELF_HOSTED_FN("throw", "AsyncIteratorHelperThrow", 1, 0),
+ JS_FS_END,
+};
+
+static const JSClass AsyncIteratorHelperPrototypeClass = {
+ "Async Iterator Helper", 0};
+
+const JSClass AsyncIteratorHelperObject::class_ = {
+ "Async Iterator Helper",
+ JSCLASS_HAS_RESERVED_SLOTS(AsyncIteratorHelperObject::SlotCount),
+};
+
+/* static */
+NativeObject* GlobalObject::getOrCreateAsyncIteratorHelperPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ return MaybeNativeObject(
+ getOrCreateBuiltinProto(cx, global, ProtoKind::AsyncIteratorHelperProto,
+ initAsyncIteratorHelperProto));
+}
+
+/* static */
+bool GlobalObject::initAsyncIteratorHelperProto(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (global->hasBuiltinProto(ProtoKind::AsyncIteratorHelperProto)) {
+ return true;
+ }
+
+ RootedObject asyncIterProto(
+ cx, GlobalObject::getOrCreateAsyncIteratorPrototype(cx, global));
+ if (!asyncIterProto) {
+ return false;
+ }
+
+ RootedObject asyncIteratorHelperProto(
+ cx, GlobalObject::createBlankPrototypeInheriting(
+ cx, &AsyncIteratorHelperPrototypeClass, asyncIterProto));
+ if (!asyncIteratorHelperProto) {
+ return false;
+ }
+ if (!DefinePropertiesAndFunctions(cx, asyncIteratorHelperProto, nullptr,
+ async_iterator_helper_methods)) {
+ return false;
+ }
+
+ global->initBuiltinProto(ProtoKind::AsyncIteratorHelperProto,
+ asyncIteratorHelperProto);
+ return true;
+}
+
+AsyncIteratorHelperObject* js::NewAsyncIteratorHelper(JSContext* cx) {
+ RootedObject proto(cx, GlobalObject::getOrCreateAsyncIteratorHelperPrototype(
+ cx, cx->global()));
+ if (!proto) {
+ return nullptr;
+ }
+ return NewObjectWithGivenProto<AsyncIteratorHelperObject>(cx, proto);
+}
diff --git a/js/src/vm/AsyncIteration.h b/js/src/vm/AsyncIteration.h
new file mode 100644
index 0000000000..4629329cc8
--- /dev/null
+++ b/js/src/vm/AsyncIteration.h
@@ -0,0 +1,571 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_AsyncIteration_h
+#define vm_AsyncIteration_h
+
+#include "builtin/Promise.h" // js::PromiseHandler
+#include "builtin/SelfHostingDefines.h"
+#include "js/Class.h"
+#include "vm/GeneratorObject.h"
+#include "vm/JSObject.h"
+#include "vm/List.h"
+#include "vm/PromiseObject.h"
+
+// [SMDOC] Async generators
+//
+// # Start
+//
+// When an async generator is called, it synchronously runs until the
+// JSOp::InitialYield and then suspends, just like a sync generator, and returns
+// an async generator object (js::AsyncGeneratorObject).
+//
+//
+// # Request queue
+//
+// When next/return/throw is called on the async generator object,
+// js::AsyncGeneratorEnqueue performs the following:
+// * Create a new AsyncGeneratorRequest and enqueue it in the generator
+// object's request queue.
+// * Resume the generator with the oldest request, if the generator is
+// suspended (see "Resume" section below)
+// * Return the promise for the request
+//
+// This is done in js::AsyncGeneratorEnqueue, which corresponds to
+// AsyncGeneratorEnqueue in the spec,
+// and js::AsyncGeneratorResumeNext corresponds to the following:
+// * AsyncGeneratorResolve
+// * AsyncGeneratorReject
+// * AsyncGeneratorResumeNext
+//
+// The returned promise is resolved when the resumption for the request
+// completes with yield/throw/return, in js::AsyncGeneratorResolve and
+// js::AsyncGeneratorReject.
+// They correspond to AsyncGeneratorResolve and AsyncGeneratorReject in the
+// spec.
+//
+//
+// # Await
+//
+// Async generator's `await` is implemented differently than async function's
+// `await`.
+//
+// The bytecode is the following:
+// (ignoring CanSkipAwait; see the comment in AsyncFunction.h for more details)
+//
+// ```
+// (operand here) # VALUE
+// GetAliasedVar ".generator" # VALUE .generator
+// Await 0 # RVAL GENERATOR RESUMEKIND
+//
+// AfterYield # RVAL GENERATOR RESUMEKIND
+// CheckResumeKind # RVAL
+// ```
+//
+// Async generators don't use JSOp::AsyncAwait, and that part is handled
+// in js::AsyncGeneratorResume, and js::AsyncGeneratorAwait called there.
+//
+// Both JSOp::Await and JSOp::Yield behave in the exactly same way,
+// and js::AsyncGeneratorResume checks the last opcode and branches for
+// await/yield/return cases.
+//
+//
+// # Reaction jobs and resume after await
+//
+// This is almost same as for async functions (see AsyncFunction.h).
+//
+// The reaction record for the job is marked as "this is for async generator"
+// (see js::AsyncGeneratorAwait), and handled specially in
+// js::PromiseReactionJob, which calls js::AsyncGeneratorPromiseReactionJob.
+//
+//
+// # Yield
+//
+// `yield` is implemented with the following bytecode sequence:
+// (Ignoring CanSkipAwait for simplicity)
+//
+// ```
+// (operand here) # VALUE
+// GetAliasedVar ".generator" # VALUE .generator
+// Await 1 # RVAL GENERATOR RESUMEKIND
+// AfterYield # RVAL GENERATOR RESUMEKIND
+// CheckResumeKind # RVAL
+//
+// GetAliasedVar ".generator" # RVAL .generator
+// Yield 2 # RVAL2 GENERATOR RESUMEKIND
+//
+// AfterYield # RVAL2 GENERATOR RESUMEKIND
+// CheckResumeKind # RVAL2
+// ```
+//
+// The 1st part (JSOp::Await + JSOp::CheckResumeKind) performs an implicit
+// `await`, as specified in AsyncGeneratorYield step 5.
+//
+// AsyncGeneratorYield ( value )
+// https://tc39.es/ecma262/#sec-asyncgeneratoryield
+//
+// 5. Set value to ? Await(value).
+//
+// The 2nd part (JSOp::Yield) suspends execution and yields the result of
+// `await`, as specified in AsyncGeneratorYield steps 1-4, 6-7, 9-10.
+//
+// AsyncGeneratorYield ( value )
+// https://tc39.es/ecma262/#sec-asyncgeneratoryield
+//
+// 1. Let genContext be the running execution context.
+// 2. Assert: genContext is the execution context of a generator.
+// 3. Let generator be the value of the Generator component of genContext.
+// 4. Assert: GetGeneratorKind() is async.
+// ..
+// 6. Set generator.[[AsyncGeneratorState]] to suspendedYield.
+// 7. Remove genContext from the execution context stack and restore the
+// execution context that is at the top of the execution context stack as
+// the running execution context.
+// 8. ...
+// 9. Return ! AsyncGeneratorResolve(generator, value, false).
+// 10. NOTE: This returns to the evaluation of the operation that had most
+// previously resumed evaluation of genContext.
+//
+// The last part (JSOp::CheckResumeKind) checks the resumption type and
+// resumes/throws/returns the execution, as specified in AsyncGeneratorYield
+// step 8.
+//
+// 8. Set the code evaluation state of genContext such that when evaluation is
+// resumed with a Completion resumptionValue the following steps will be
+// performed:
+// a. If resumptionValue.[[Type]] is not return, return
+// Completion(resumptionValue).
+// b. Let awaited be Await(resumptionValue.[[Value]]).
+// c. If awaited.[[Type]] is throw, return Completion(awaited).
+// d. Assert: awaited.[[Type]] is normal.
+// e. Return Completion { [[Type]]: return, [[Value]]: awaited.[[Value]],
+// [[Target]]: empty }.
+// f. NOTE: When one of the above steps returns, it returns to the
+// evaluation of the YieldExpression production that originally called
+// this abstract operation.
+//
+// Resumption with `AsyncGenerator.prototype.return` is handled differently.
+// See "Resumption with return" section below.
+//
+//
+// # Return
+//
+// `return` with operand is implemented with the following bytecode sequence:
+// (Ignoring CanSkipAwait for simplicity)
+//
+// ```
+// (operand here) # VALUE
+// GetAliasedVar ".generator" # VALUE .generator
+// Await 0 # RVAL GENERATOR RESUMEKIND
+// AfterYield # RVAL GENERATOR RESUMEKIND
+// CheckResumeKind # RVAL
+//
+// SetRval #
+// GetAliasedVar ".generator" # .generator
+// FinalYieldRval #
+// ```
+//
+// The 1st part (JSOp::Await + JSOp::CheckResumeKind) performs implicit
+// `await`, as specified in ReturnStatement's Evaluation step 3.
+//
+// ReturnStatement: return Expression;
+// https://tc39.es/ecma262/#sec-return-statement-runtime-semantics-evaluation
+//
+// 3. If ! GetGeneratorKind() is async, set exprValue to ? Await(exprValue).
+//
+// And the 2nd part corresponds to AsyncGeneratorStart steps 5.a-e, 5.g.
+//
+// AsyncGeneratorStart ( generator, generatorBody )
+// https://tc39.es/ecma262/#sec-asyncgeneratorstart
+//
+// 5. Set the code evaluation state of genContext such that when evaluation
+// is resumed for that execution context the following steps will be
+// performed:
+// a. Let result be the result of evaluating generatorBody.
+// b. Assert: If we return here, the async generator either threw an
+// exception or performed either an implicit or explicit return.
+// c. Remove genContext from the execution context stack and restore the
+// execution context that is at the top of the execution context stack
+// as the running execution context.
+// d. Set generator.[[AsyncGeneratorState]] to completed.
+// e. If result is a normal completion, let resultValue be undefined.
+// ...
+// g. Return ! AsyncGeneratorResolve(generator, resultValue, true).
+//
+// `return` without operand or implicit return is implicit with the following
+// bytecode sequence:
+//
+// ```
+// Undefined # undefined
+// SetRval #
+// GetAliasedVar ".generator" # .generator
+// FinalYieldRval #
+// ```
+//
+// This is also AsyncGeneratorStart steps 5.a-e, 5.g.
+//
+//
+// # Throw
+//
+// Unlike async function, async generator doesn't use implicit try-catch,
+// but the throw completion is handled by js::AsyncGeneratorResume,
+// and js::AsyncGeneratorThrown is called there.
+//
+// 5. ...
+// f. Else,
+// i. Let resultValue be result.[[Value]].
+// ii. If result.[[Type]] is not return, then
+// 1. Return ! AsyncGeneratorReject(generator, resultValue).
+//
+//
+// # Resumption with return
+//
+// Resumption with return completion is handled in js::AsyncGeneratorResumeNext.
+//
+// If the generator is suspended, it doesn't immediately resume the generator
+// script itself, but handles implicit `await` it in
+// js::AsyncGeneratorResumeNext.
+// (See PromiseHandlerAsyncGeneratorYieldReturnAwaitedFulfilled and
+// PromiseHandlerAsyncGeneratorYieldReturnAwaitedRejected), and resumes the
+// generator with the result of await.
+// And the return completion is finally handled in JSOp::CheckResumeKind
+// after JSOp::Yield.
+//
+// This corresponds to AsyncGeneratorYield step 8.
+//
+// AsyncGeneratorYield ( value )
+// https://tc39.es/ecma262/#sec-asyncgeneratoryield
+//
+// 8. Set the code evaluation state of genContext such that when evaluation
+// is resumed with a Completion resumptionValue the following steps will
+// be performed:
+// ..
+// b. Let awaited be Await(resumptionValue.[[Value]]).
+// c. If awaited.[[Type]] is throw, return Completion(awaited).
+// d. Assert: awaited.[[Type]] is normal.
+// e. Return Completion { [[Type]]: return, [[Value]]: awaited.[[Value]],
+// [[Target]]: empty }.
+//
+// If the generator is already completed, it awaits on the return value,
+// (See PromiseHandlerAsyncGeneratorResumeNextReturnFulfilled and
+// PromiseHandlerAsyncGeneratorResumeNextReturnRejected), and resolves the
+// request's promise with the value.
+//
+// It corresponds to AsyncGeneratorResumeNext step 10.b.i.
+//
+// AsyncGeneratorResumeNext ( generator )
+// https://tc39.es/ecma262/#sec-asyncgeneratorresumenext
+//
+// 10. If completion is an abrupt completion, then
+// ..
+// b. If state is completed, then
+// i. If completion.[[Type]] is return, then
+// 1. Set generator.[[AsyncGeneratorState]] to awaiting-return.
+// 2. Let promise be ? PromiseResolve(%Promise%, completion.[[Value]]).
+// 3. Let stepsFulfilled be the algorithm steps defined in
+// AsyncGeneratorResumeNext Return Processor Fulfilled Functions.
+// 4. Let onFulfilled be ! CreateBuiltinFunction(stepsFulfilled, «
+// [[Generator]] »).
+// 5. Set onFulfilled.[[Generator]] to generator.
+// 6. Let stepsRejected be the algorithm steps defined in
+// AsyncGeneratorResumeNext Return Processor Rejected Functions.
+// 7. Let onRejected be ! CreateBuiltinFunction(stepsRejected, «
+// [[Generator]] »).
+// 8. Set onRejected.[[Generator]] to generator.
+// 9. Perform ! PerformPromiseThen(promise, onFulfilled, onRejected).
+// 10. Return undefined.
+//
+
+namespace js {
+
+class AsyncGeneratorObject;
+enum class CompletionKind : uint8_t;
+
+extern const JSClass AsyncGeneratorFunctionClass;
+
+[[nodiscard]] bool AsyncGeneratorPromiseReactionJob(
+ JSContext* cx, PromiseHandler handler,
+ Handle<AsyncGeneratorObject*> generator, HandleValue argument);
+
+bool AsyncGeneratorNext(JSContext* cx, unsigned argc, Value* vp);
+bool AsyncGeneratorReturn(JSContext* cx, unsigned argc, Value* vp);
+bool AsyncGeneratorThrow(JSContext* cx, unsigned argc, Value* vp);
+
+// AsyncGeneratorRequest record in the spec.
+// Stores the info from AsyncGenerator#{next,return,throw}.
+//
+// This object is reused across multiple requests as an optimization, and
+// stored in the Slot_CachedRequest slot.
+class AsyncGeneratorRequest : public NativeObject {
+ private:
+ enum AsyncGeneratorRequestSlots {
+ // Int32 value with CompletionKind.
+ // Normal: next
+ // Return: return
+ // Throw: throw
+ Slot_CompletionKind = 0,
+
+ // The value passed to AsyncGenerator#{next,return,throw}.
+ Slot_CompletionValue,
+
+ // The promise returned by AsyncGenerator#{next,return,throw}.
+ Slot_Promise,
+
+ Slots,
+ };
+
+ void init(CompletionKind completionKind, const Value& completionValue,
+ PromiseObject* promise) {
+ setFixedSlot(Slot_CompletionKind,
+ Int32Value(static_cast<int32_t>(completionKind)));
+ setFixedSlot(Slot_CompletionValue, completionValue);
+ setFixedSlot(Slot_Promise, ObjectValue(*promise));
+ }
+
+ // Clear the request data for reuse.
+ void clearData() {
+ setFixedSlot(Slot_CompletionValue, NullValue());
+ setFixedSlot(Slot_Promise, NullValue());
+ }
+
+ friend AsyncGeneratorObject;
+
+ public:
+ static const JSClass class_;
+
+ static AsyncGeneratorRequest* create(JSContext* cx,
+ CompletionKind completionKind,
+ HandleValue completionValue,
+ Handle<PromiseObject*> promise);
+
+ CompletionKind completionKind() const {
+ return static_cast<CompletionKind>(
+ getFixedSlot(Slot_CompletionKind).toInt32());
+ }
+ JS::Value completionValue() const {
+ return getFixedSlot(Slot_CompletionValue);
+ }
+ PromiseObject* promise() const {
+ return &getFixedSlot(Slot_Promise).toObject().as<PromiseObject>();
+ }
+};
+
+class AsyncGeneratorObject : public AbstractGeneratorObject {
+ private:
+ enum AsyncGeneratorObjectSlots {
+ // Int32 value containing one of the |State| fields from below.
+ Slot_State = AbstractGeneratorObject::RESERVED_SLOTS,
+
+ // * null value if this async generator has no requests
+ // * AsyncGeneratorRequest if this async generator has only one request
+ // * list object if this async generator has 2 or more requests
+ Slot_QueueOrRequest,
+
+ // Cached AsyncGeneratorRequest for later use.
+ // undefined if there's no cache.
+ Slot_CachedRequest,
+
+ Slots
+ };
+
+ public:
+ enum State {
+ // "suspendedStart" in the spec.
+ // Suspended after invocation.
+ State_SuspendedStart,
+
+ // "suspendedYield" in the spec
+ // Suspended with `yield` expression.
+ State_SuspendedYield,
+
+ // "executing" in the spec.
+ // Resumed from initial suspend or yield, and either running the script
+ // or awaiting for `await` expression.
+ State_Executing,
+
+ // Part of "executing" in the spec.
+ // Awaiting on the value passed by AsyncGenerator#return which is called
+ // while executing.
+ State_AwaitingYieldReturn,
+
+ // "awaiting-return" in the spec.
+ // Awaiting on the value passed by AsyncGenerator#return which is called
+ // after completed.
+ State_AwaitingReturn,
+
+ // "completed" in the spec.
+ // The generator is completed.
+ State_Completed
+ };
+
+ State state() const {
+ return static_cast<State>(getFixedSlot(Slot_State).toInt32());
+ }
+ void setState(State state_) { setFixedSlot(Slot_State, Int32Value(state_)); }
+
+ private:
+ // Queue is implemented in 2 ways. If only one request is queued ever,
+ // request is stored directly to the slot. Once 2 requests are queued, a
+ // list is created and requests are appended into it, and the list is
+ // stored to the slot.
+
+ bool isSingleQueue() const {
+ return getFixedSlot(Slot_QueueOrRequest).isNull() ||
+ getFixedSlot(Slot_QueueOrRequest)
+ .toObject()
+ .is<AsyncGeneratorRequest>();
+ }
+ bool isSingleQueueEmpty() const {
+ return getFixedSlot(Slot_QueueOrRequest).isNull();
+ }
+ void setSingleQueueRequest(AsyncGeneratorRequest* request) {
+ setFixedSlot(Slot_QueueOrRequest, ObjectValue(*request));
+ }
+ void clearSingleQueueRequest() {
+ setFixedSlot(Slot_QueueOrRequest, NullValue());
+ }
+ AsyncGeneratorRequest* singleQueueRequest() const {
+ return &getFixedSlot(Slot_QueueOrRequest)
+ .toObject()
+ .as<AsyncGeneratorRequest>();
+ }
+
+ ListObject* queue() const {
+ return &getFixedSlot(Slot_QueueOrRequest).toObject().as<ListObject>();
+ }
+ void setQueue(ListObject* queue_) {
+ setFixedSlot(Slot_QueueOrRequest, ObjectValue(*queue_));
+ }
+
+ public:
+ static const JSClass class_;
+ static const JSClassOps classOps_;
+
+ static AsyncGeneratorObject* create(JSContext* cx, HandleFunction asyncGen);
+
+ bool isSuspendedStart() const { return state() == State_SuspendedStart; }
+ bool isSuspendedYield() const { return state() == State_SuspendedYield; }
+ bool isExecuting() const { return state() == State_Executing; }
+ bool isAwaitingYieldReturn() const {
+ return state() == State_AwaitingYieldReturn;
+ }
+ bool isAwaitingReturn() const { return state() == State_AwaitingReturn; }
+ bool isCompleted() const { return state() == State_Completed; }
+
+ void setSuspendedStart() { setState(State_SuspendedStart); }
+ void setSuspendedYield() { setState(State_SuspendedYield); }
+ void setExecuting() { setState(State_Executing); }
+ void setAwaitingYieldReturn() { setState(State_AwaitingYieldReturn); }
+ void setAwaitingReturn() { setState(State_AwaitingReturn); }
+ void setCompleted() { setState(State_Completed); }
+
+ [[nodiscard]] static bool enqueueRequest(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator,
+ Handle<AsyncGeneratorRequest*> request);
+
+ static AsyncGeneratorRequest* dequeueRequest(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator);
+
+ static AsyncGeneratorRequest* peekRequest(
+ Handle<AsyncGeneratorObject*> generator);
+
+ bool isQueueEmpty() const {
+ if (isSingleQueue()) {
+ return isSingleQueueEmpty();
+ }
+ return queue()->getDenseInitializedLength() == 0;
+ }
+
+ // This function does either of the following:
+ // * return a cached request object with the slots updated
+ // * create a new request object with the slots set
+ static AsyncGeneratorRequest* createRequest(
+ JSContext* cx, Handle<AsyncGeneratorObject*> generator,
+ CompletionKind completionKind, HandleValue completionValue,
+ Handle<PromiseObject*> promise);
+
+ // Stores the given request to the generator's cache after clearing its data
+ // slots. The cached request will be reused in the subsequent createRequest
+ // call.
+ void cacheRequest(AsyncGeneratorRequest* request) {
+ if (hasCachedRequest()) {
+ return;
+ }
+
+ request->clearData();
+ setFixedSlot(Slot_CachedRequest, ObjectValue(*request));
+ }
+
+ private:
+ bool hasCachedRequest() const {
+ return getFixedSlot(Slot_CachedRequest).isObject();
+ }
+
+ AsyncGeneratorRequest* takeCachedRequest() {
+ auto request = &getFixedSlot(Slot_CachedRequest)
+ .toObject()
+ .as<AsyncGeneratorRequest>();
+ clearCachedRequest();
+ return request;
+ }
+
+ void clearCachedRequest() { setFixedSlot(Slot_CachedRequest, NullValue()); }
+};
+
+JSObject* CreateAsyncFromSyncIterator(JSContext* cx, HandleObject iter,
+ HandleValue nextMethod);
+
+class AsyncFromSyncIteratorObject : public NativeObject {
+ private:
+ enum AsyncFromSyncIteratorObjectSlots {
+ // Object that implements the sync iterator protocol.
+ Slot_Iterator = 0,
+
+ // The `next` property of the iterator object.
+ Slot_NextMethod = 1,
+
+ Slots
+ };
+
+ void init(JSObject* iterator, const Value& nextMethod) {
+ setFixedSlot(Slot_Iterator, ObjectValue(*iterator));
+ setFixedSlot(Slot_NextMethod, nextMethod);
+ }
+
+ public:
+ static const JSClass class_;
+
+ static JSObject* create(JSContext* cx, HandleObject iter,
+ HandleValue nextMethod);
+
+ JSObject* iterator() const { return &getFixedSlot(Slot_Iterator).toObject(); }
+
+ const Value& nextMethod() const { return getFixedSlot(Slot_NextMethod); }
+};
+
+class AsyncIteratorObject : public NativeObject {
+ public:
+ static const JSClass class_;
+ static const JSClass protoClass_;
+};
+
+// Iterator Helpers proposal
+class AsyncIteratorHelperObject : public NativeObject {
+ public:
+ static const JSClass class_;
+
+ enum { GeneratorSlot, SlotCount };
+
+ static_assert(GeneratorSlot == ASYNC_ITERATOR_HELPER_GENERATOR_SLOT,
+ "GeneratorSlot must match self-hosting define for generator "
+ "object slot.");
+};
+
+AsyncIteratorHelperObject* NewAsyncIteratorHelper(JSContext* cx);
+
+} // namespace js
+
+#endif /* vm_AsyncIteration_h */
diff --git a/js/src/vm/AtomsTable.h b/js/src/vm/AtomsTable.h
new file mode 100644
index 0000000000..aae7728fe5
--- /dev/null
+++ b/js/src/vm/AtomsTable.h
@@ -0,0 +1,123 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Implementation details of the atoms table.
+ */
+
+#ifndef vm_AtomsTable_h
+#define vm_AtomsTable_h
+
+#include "gc/Barrier.h"
+#include "js/GCHashTable.h"
+#include "js/TypeDecls.h"
+#include "js/Vector.h"
+#include "vm/StringType.h"
+
+/*
+ * The atoms table is a mapping from strings to JSAtoms that supports
+ * incremental sweeping.
+ */
+
+namespace js {
+
+struct AtomHasher {
+ struct Lookup;
+ static inline HashNumber hash(const Lookup& l);
+ static MOZ_ALWAYS_INLINE bool match(const WeakHeapPtr<JSAtom*>& entry,
+ const Lookup& lookup);
+ static void rekey(WeakHeapPtr<JSAtom*>& k,
+ const WeakHeapPtr<JSAtom*>& newKey) {
+ k = newKey;
+ }
+};
+
+// Note: Use a 'class' here to make forward declarations easier to use.
+class AtomSet : public JS::GCHashSet<WeakHeapPtr<JSAtom*>, AtomHasher,
+ SystemAllocPolicy> {
+ using Base =
+ JS::GCHashSet<WeakHeapPtr<JSAtom*>, AtomHasher, SystemAllocPolicy>;
+
+ public:
+ AtomSet() = default;
+ explicit AtomSet(size_t length) : Base(length){};
+};
+
+// This class is a wrapper for AtomSet that is used to ensure the AtomSet is
+// not modified. It should only expose read-only methods from AtomSet.
+// Note however that the atoms within the table can be marked during GC.
+class FrozenAtomSet {
+ AtomSet* mSet;
+
+ public:
+ // This constructor takes ownership of the passed-in AtomSet.
+ explicit FrozenAtomSet(AtomSet* set) { mSet = set; }
+
+ ~FrozenAtomSet() { js_delete(mSet); }
+
+ MOZ_ALWAYS_INLINE AtomSet::Ptr readonlyThreadsafeLookup(
+ const AtomSet::Lookup& l) const;
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mSet->shallowSizeOfIncludingThis(mallocSizeOf);
+ }
+
+ using Range = AtomSet::Range;
+
+ AtomSet::Range all() const { return mSet->all(); }
+};
+
+class AtomsTable {
+ // Use a low initial capacity for atom hash tables to avoid penalizing
+ // runtimes which create a small number of atoms.
+ static const size_t InitialTableSize = 16;
+
+ // The main atoms set.
+ AtomSet atoms;
+
+ // Set of atoms added while the |atoms| set is being swept.
+ AtomSet* atomsAddedWhileSweeping;
+
+ // List of pinned atoms that are traced in every GC.
+ Vector<JSAtom*, 0, SystemAllocPolicy> pinnedAtoms;
+
+ public:
+ // An iterator used for sweeping atoms incrementally.
+ using SweepIterator = AtomSet::Enum;
+
+ AtomsTable();
+ ~AtomsTable();
+ bool init();
+
+ template <typename CharT>
+ MOZ_ALWAYS_INLINE JSAtom* atomizeAndCopyCharsNonStaticValidLength(
+ JSContext* cx, const CharT* chars, size_t length,
+ const mozilla::Maybe<uint32_t>& indexValue,
+ const AtomHasher::Lookup& lookup);
+
+ bool maybePinExistingAtom(JSContext* cx, JSAtom* atom);
+
+ void tracePinnedAtoms(JSTracer* trc);
+
+ // Sweep all atoms non-incrementally.
+ void traceWeak(JSTracer* trc);
+
+ bool startIncrementalSweep(mozilla::Maybe<SweepIterator>& atomsToSweepOut);
+
+ // Sweep some atoms incrementally and return whether we finished.
+ bool sweepIncrementally(SweepIterator& atomsToSweep, SliceBudget& budget);
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ private:
+ void mergeAtomsAddedWhileSweeping();
+};
+
+bool AtomIsPinned(JSContext* cx, JSAtom* atom);
+
+} // namespace js
+
+#endif /* vm_AtomsTable_h */
diff --git a/js/src/vm/BigIntType.cpp b/js/src/vm/BigIntType.cpp
new file mode 100644
index 0000000000..0f9621da54
--- /dev/null
+++ b/js/src/vm/BigIntType.cpp
@@ -0,0 +1,3847 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Portions of this code taken from WebKit, whose copyright is as follows:
+ *
+ * Copyright (C) 2017 Caio Lima <ticaiolima@gmail.com>
+ * Copyright (C) 2017-2018 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Portions of this code taken from V8, whose copyright notice is as follows:
+ *
+ * Copyright 2017 the V8 project authors. All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Portions of this code taken from Dart, whose copyright notice is as follows:
+ *
+ * Copyright (c) 2014 the Dart project authors. Please see the AUTHORS file
+ * [1] for details. All rights reserved. Use of this source code is governed by
+ * a BSD-style license that can be found in the LICENSE file [2].
+ *
+ * [1] https://github.com/dart-lang/sdk/blob/master/AUTHORS
+ * [2] https://github.com/dart-lang/sdk/blob/master/LICENSE
+ *
+ * Portions of this code taken from Go, whose copyright notice is as follows:
+ *
+ * Copyright 2009 The Go Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style
+ * license that can be found in the LICENSE file [3].
+ *
+ * [3] https://golang.org/LICENSE
+ */
+
+#include "vm/BigIntType.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryChecking.h"
+#include "mozilla/Range.h"
+#include "mozilla/RangedPtr.h"
+#include "mozilla/Span.h" // mozilla::Span
+#include "mozilla/WrappingOperations.h"
+
+#include <functional>
+#include <limits>
+#include <memory>
+#include <type_traits> // std::is_same_v
+
+#include "jsnum.h"
+
+#include "gc/Allocator.h"
+#include "js/BigInt.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/StableStringChars.h"
+#include "js/Utility.h"
+#include "util/CheckedArithmetic.h"
+#include "util/DifferentialTesting.h"
+#include "vm/JSContext.h"
+#include "vm/StaticStrings.h"
+
+#include "gc/GCContext-inl.h"
+#include "gc/Nursery-inl.h"
+#include "vm/JSContext-inl.h"
+
+using namespace js;
+
+using JS::AutoStableStringChars;
+using mozilla::Abs;
+using mozilla::AssertedCast;
+using mozilla::BitwiseCast;
+using mozilla::Maybe;
+using mozilla::NegativeInfinity;
+using mozilla::Nothing;
+using mozilla::PositiveInfinity;
+using mozilla::Range;
+using mozilla::RangedPtr;
+using mozilla::Some;
+using mozilla::WrapToSigned;
+
+static inline unsigned DigitLeadingZeroes(BigInt::Digit x) {
+ return sizeof(x) == 4 ? mozilla::CountLeadingZeroes32(x)
+ : mozilla::CountLeadingZeroes64(x);
+}
+
+#ifdef DEBUG
+static bool HasLeadingZeroes(BigInt* bi) {
+ return bi->digitLength() > 0 && bi->digit(bi->digitLength() - 1) == 0;
+}
+#endif
+
+BigInt* BigInt::createUninitialized(JSContext* cx, size_t digitLength,
+ bool isNegative, gc::Heap heap) {
+ if (digitLength > MaxDigitLength) {
+ ReportOversizedAllocation(cx, JSMSG_BIGINT_TOO_LARGE);
+ return nullptr;
+ }
+
+ BigInt* x = cx->newCell<BigInt>(heap);
+ if (!x) {
+ return nullptr;
+ }
+
+ x->setLengthAndFlags(digitLength, isNegative ? SignBit : 0);
+
+ MOZ_ASSERT(x->digitLength() == digitLength);
+ MOZ_ASSERT(x->isNegative() == isNegative);
+
+ if (digitLength > InlineDigitsLength) {
+ x->heapDigits_ = js::AllocateBigIntDigits(cx, x, digitLength);
+ if (!x->heapDigits_) {
+ // |x| is partially initialized, expose it as a BigInt using inline digits
+ // to the GC.
+ x->setLengthAndFlags(0, 0);
+ return nullptr;
+ }
+
+ AddCellMemory(x, digitLength * sizeof(Digit), js::MemoryUse::BigIntDigits);
+ }
+
+ return x;
+}
+
+void BigInt::initializeDigitsToZero() {
+ auto digs = digits();
+ std::uninitialized_fill_n(digs.begin(), digs.Length(), 0);
+}
+
+void BigInt::finalize(JS::GCContext* gcx) {
+ MOZ_ASSERT(isTenured());
+ if (hasHeapDigits()) {
+ size_t size = digitLength() * sizeof(Digit);
+ gcx->free_(this, heapDigits_, size, js::MemoryUse::BigIntDigits);
+ }
+}
+
+js::HashNumber BigInt::hash() const {
+ js::HashNumber h =
+ mozilla::HashBytes(digits().data(), digitLength() * sizeof(Digit));
+ return mozilla::AddToHash(h, isNegative());
+}
+
+size_t BigInt::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return hasInlineDigits() ? 0 : mallocSizeOf(heapDigits_);
+}
+
+size_t BigInt::sizeOfExcludingThisInNursery(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ MOZ_ASSERT(!isTenured());
+
+ if (hasInlineDigits()) {
+ return 0;
+ }
+
+ const Nursery& nursery = runtimeFromMainThread()->gc.nursery();
+ if (nursery.isInside(heapDigits_)) {
+ // See |AllocateBigIntDigits()|.
+ return RoundUp(digitLength() * sizeof(Digit), sizeof(Value));
+ }
+
+ return mallocSizeOf(heapDigits_);
+}
+
+BigInt* BigInt::zero(JSContext* cx, gc::Heap heap) {
+ return createUninitialized(cx, 0, false, heap);
+}
+
+BigInt* BigInt::createFromDigit(JSContext* cx, Digit d, bool isNegative) {
+ MOZ_ASSERT(d != 0);
+ BigInt* res = createUninitialized(cx, 1, isNegative);
+ if (!res) {
+ return nullptr;
+ }
+ res->setDigit(0, d);
+ return res;
+}
+
+BigInt* BigInt::one(JSContext* cx) { return createFromDigit(cx, 1, false); }
+
+BigInt* BigInt::negativeOne(JSContext* cx) {
+ return createFromDigit(cx, 1, true);
+}
+
+BigInt* BigInt::createFromNonZeroRawUint64(JSContext* cx, uint64_t n,
+ bool isNegative) {
+ MOZ_ASSERT(n != 0);
+
+ size_t resultLength = 1;
+ if (DigitBits == 32 && (n >> 32) != 0) {
+ resultLength = 2;
+ }
+
+ BigInt* result = createUninitialized(cx, resultLength, isNegative);
+ if (!result) {
+ return nullptr;
+ }
+ result->setDigit(0, n);
+ if (DigitBits == 32 && resultLength > 1) {
+ result->setDigit(1, n >> 32);
+ }
+
+ MOZ_ASSERT(!HasLeadingZeroes(result));
+ return result;
+}
+
+BigInt* BigInt::neg(JSContext* cx, HandleBigInt x) {
+ if (x->isZero()) {
+ return x;
+ }
+
+ BigInt* result = copy(cx, x);
+ if (!result) {
+ return nullptr;
+ }
+ result->toggleHeaderFlagBit(SignBit);
+ return result;
+}
+
+#if !defined(JS_64BIT)
+# define HAVE_TWO_DIGIT 1
+using TwoDigit = uint64_t;
+#elif defined(__SIZEOF_INT128__)
+# define HAVE_TWO_DIGIT 1
+using TwoDigit = __uint128_t;
+#endif
+
+inline BigInt::Digit BigInt::digitMul(Digit a, Digit b, Digit* high) {
+#if defined(HAVE_TWO_DIGIT)
+ TwoDigit result = static_cast<TwoDigit>(a) * static_cast<TwoDigit>(b);
+ *high = result >> DigitBits;
+
+ return static_cast<Digit>(result);
+#else
+ // Multiply in half-pointer-sized chunks.
+ // For inputs [AH AL]*[BH BL], the result is:
+ //
+ // [AL*BL] // rLow
+ // + [AL*BH] // rMid1
+ // + [AH*BL] // rMid2
+ // + [AH*BH] // rHigh
+ // = [R4 R3 R2 R1] // high = [R4 R3], low = [R2 R1]
+ //
+ // Where of course we must be careful with carries between the columns.
+ Digit aLow = a & HalfDigitMask;
+ Digit aHigh = a >> HalfDigitBits;
+ Digit bLow = b & HalfDigitMask;
+ Digit bHigh = b >> HalfDigitBits;
+
+ Digit rLow = aLow * bLow;
+ Digit rMid1 = aLow * bHigh;
+ Digit rMid2 = aHigh * bLow;
+ Digit rHigh = aHigh * bHigh;
+
+ Digit carry = 0;
+ Digit low = digitAdd(rLow, rMid1 << HalfDigitBits, &carry);
+ low = digitAdd(low, rMid2 << HalfDigitBits, &carry);
+
+ *high = (rMid1 >> HalfDigitBits) + (rMid2 >> HalfDigitBits) + rHigh + carry;
+
+ return low;
+#endif
+}
+
+BigInt::Digit BigInt::digitDiv(Digit high, Digit low, Digit divisor,
+ Digit* remainder) {
+ MOZ_ASSERT(high < divisor, "division must not overflow");
+#if defined(__x86_64__)
+ Digit quotient;
+ Digit rem;
+ __asm__("divq %[divisor]"
+ // Outputs: `quotient` will be in rax, `rem` in rdx.
+ : "=a"(quotient), "=d"(rem)
+ // Inputs: put `high` into rdx, `low` into rax, and `divisor` into
+ // any register or stack slot.
+ : "d"(high), "a"(low), [divisor] "rm"(divisor));
+ *remainder = rem;
+ return quotient;
+#elif defined(__i386__)
+ Digit quotient;
+ Digit rem;
+ __asm__("divl %[divisor]"
+ // Outputs: `quotient` will be in eax, `rem` in edx.
+ : "=a"(quotient), "=d"(rem)
+ // Inputs: put `high` into edx, `low` into eax, and `divisor` into
+ // any register or stack slot.
+ : "d"(high), "a"(low), [divisor] "rm"(divisor));
+ *remainder = rem;
+ return quotient;
+#else
+ static constexpr Digit HalfDigitBase = 1ull << HalfDigitBits;
+ // Adapted from Warren, Hacker's Delight, p. 152.
+ unsigned s = DigitLeadingZeroes(divisor);
+ // If `s` is DigitBits here, it causes an undefined behavior.
+ // But `s` is never DigitBits since `divisor` is never zero here.
+ MOZ_ASSERT(s != DigitBits);
+ divisor <<= s;
+
+ Digit vn1 = divisor >> HalfDigitBits;
+ Digit vn0 = divisor & HalfDigitMask;
+
+ // `sZeroMask` which is 0 if s == 0 and all 1-bits otherwise.
+ //
+ // `s` can be 0. If `s` is 0, performing "low >> (DigitBits - s)" must not
+ // be done since it causes an undefined behavior since `>> DigitBits` is
+ // undefined in C++. Quoted from C++ spec, "The type of the result is that of
+ // the promoted left operand.
+ //
+ // The behavior is undefined if the right operand is negative, or greater
+ // than or equal to the length in bits of the promoted left operand". We
+ // mask the right operand of the shift by `shiftMask` (`DigitBits - 1`),
+ // which makes `DigitBits - 0` zero.
+ //
+ // This shifting produces a value which covers 0 < `s` <= (DigitBits - 1)
+ // cases. `s` == DigitBits never happen as we asserted. Since `sZeroMask`
+ // clears the value in the case of `s` == 0, `s` == 0 case is also covered.
+ static_assert(sizeof(intptr_t) == sizeof(Digit),
+ "unexpected size of BigInt::Digit");
+ Digit sZeroMask =
+ static_cast<Digit>((-static_cast<intptr_t>(s)) >> (DigitBits - 1));
+ static constexpr unsigned shiftMask = DigitBits - 1;
+ Digit un32 =
+ (high << s) | ((low >> ((DigitBits - s) & shiftMask)) & sZeroMask);
+
+ Digit un10 = low << s;
+ Digit un1 = un10 >> HalfDigitBits;
+ Digit un0 = un10 & HalfDigitMask;
+ Digit q1 = un32 / vn1;
+ Digit rhat = un32 - q1 * vn1;
+
+ while (q1 >= HalfDigitBase || q1 * vn0 > rhat * HalfDigitBase + un1) {
+ q1--;
+ rhat += vn1;
+ if (rhat >= HalfDigitBase) {
+ break;
+ }
+ }
+
+ Digit un21 = un32 * HalfDigitBase + un1 - q1 * divisor;
+ Digit q0 = un21 / vn1;
+ rhat = un21 - q0 * vn1;
+
+ while (q0 >= HalfDigitBase || q0 * vn0 > rhat * HalfDigitBase + un0) {
+ q0--;
+ rhat += vn1;
+ if (rhat >= HalfDigitBase) {
+ break;
+ }
+ }
+
+ *remainder = (un21 * HalfDigitBase + un0 - q0 * divisor) >> s;
+ return q1 * HalfDigitBase + q0;
+#endif
+}
+
+// Multiplies `source` with `factor` and adds `summand` to the result.
+// `result` and `source` may be the same BigInt for inplace modification.
+void BigInt::internalMultiplyAdd(BigInt* source, Digit factor, Digit summand,
+ unsigned n, BigInt* result) {
+ MOZ_ASSERT(source->digitLength() >= n);
+ MOZ_ASSERT(result->digitLength() >= n);
+
+ Digit carry = summand;
+ Digit high = 0;
+ for (unsigned i = 0; i < n; i++) {
+ Digit current = source->digit(i);
+ Digit newCarry = 0;
+
+ // Compute this round's multiplication.
+ Digit newHigh = 0;
+ current = digitMul(current, factor, &newHigh);
+
+ // Add last round's carryovers.
+ current = digitAdd(current, high, &newCarry);
+ current = digitAdd(current, carry, &newCarry);
+
+ // Store result and prepare for next round.
+ result->setDigit(i, current);
+ carry = newCarry;
+ high = newHigh;
+ }
+
+ if (result->digitLength() > n) {
+ result->setDigit(n++, carry + high);
+
+ // Current callers don't pass in such large results, but let's be robust.
+ while (n < result->digitLength()) {
+ result->setDigit(n++, 0);
+ }
+ } else {
+ MOZ_ASSERT(!(carry + high));
+ }
+}
+
+// Multiplies `this` with `factor` and adds `summand` to the result.
+void BigInt::inplaceMultiplyAdd(Digit factor, Digit summand) {
+ internalMultiplyAdd(this, factor, summand, digitLength(), this);
+}
+
+// Multiplies `multiplicand` with `multiplier` and adds the result to
+// `accumulator`, starting at `accumulatorIndex` for the least-significant
+// digit. Callers must ensure that `accumulator`'s digitLength and
+// corresponding digit storage is long enough to hold the result.
+void BigInt::multiplyAccumulate(BigInt* multiplicand, Digit multiplier,
+ BigInt* accumulator,
+ unsigned accumulatorIndex) {
+ MOZ_ASSERT(accumulator->digitLength() >
+ multiplicand->digitLength() + accumulatorIndex);
+ if (!multiplier) {
+ return;
+ }
+
+ Digit carry = 0;
+ Digit high = 0;
+ for (unsigned i = 0; i < multiplicand->digitLength();
+ i++, accumulatorIndex++) {
+ Digit acc = accumulator->digit(accumulatorIndex);
+ Digit newCarry = 0;
+
+ // Add last round's carryovers.
+ acc = digitAdd(acc, high, &newCarry);
+ acc = digitAdd(acc, carry, &newCarry);
+
+ // Compute this round's multiplication.
+ Digit multiplicandDigit = multiplicand->digit(i);
+ Digit low = digitMul(multiplier, multiplicandDigit, &high);
+ acc = digitAdd(acc, low, &newCarry);
+
+ // Store result and prepare for next round.
+ accumulator->setDigit(accumulatorIndex, acc);
+ carry = newCarry;
+ }
+
+ while (carry || high) {
+ MOZ_ASSERT(accumulatorIndex < accumulator->digitLength());
+ Digit acc = accumulator->digit(accumulatorIndex);
+ Digit newCarry = 0;
+ acc = digitAdd(acc, high, &newCarry);
+ high = 0;
+ acc = digitAdd(acc, carry, &newCarry);
+ accumulator->setDigit(accumulatorIndex, acc);
+ carry = newCarry;
+ accumulatorIndex++;
+ }
+}
+
+inline int8_t BigInt::absoluteCompare(BigInt* x, BigInt* y) {
+ MOZ_ASSERT(!HasLeadingZeroes(x));
+ MOZ_ASSERT(!HasLeadingZeroes(y));
+
+ // Sanity checks to catch negative zeroes escaping to the wild.
+ MOZ_ASSERT(!x->isNegative() || !x->isZero());
+ MOZ_ASSERT(!y->isNegative() || !y->isZero());
+
+ int diff = x->digitLength() - y->digitLength();
+ if (diff) {
+ return diff < 0 ? -1 : 1;
+ }
+
+ int i = x->digitLength() - 1;
+ while (i >= 0 && x->digit(i) == y->digit(i)) {
+ i--;
+ }
+
+ if (i < 0) {
+ return 0;
+ }
+
+ return x->digit(i) > y->digit(i) ? 1 : -1;
+}
+
+BigInt* BigInt::absoluteAdd(JSContext* cx, HandleBigInt x, HandleBigInt y,
+ bool resultNegative) {
+ bool swap = x->digitLength() < y->digitLength();
+ // Ensure `left` has at least as many digits as `right`.
+ HandleBigInt& left = swap ? y : x;
+ HandleBigInt& right = swap ? x : y;
+
+ if (left->isZero()) {
+ MOZ_ASSERT(right->isZero());
+ return left;
+ }
+
+ if (right->isZero()) {
+ return resultNegative == left->isNegative() ? left : neg(cx, left);
+ }
+
+ // Fast path for the likely-common case of up to a uint64_t of magnitude.
+ if (left->absFitsInUint64()) {
+ MOZ_ASSERT(right->absFitsInUint64());
+
+ uint64_t lhs = left->uint64FromAbsNonZero();
+ uint64_t rhs = right->uint64FromAbsNonZero();
+
+ uint64_t res = lhs + rhs;
+ bool overflow = res < lhs;
+ MOZ_ASSERT(res != 0 || overflow);
+
+ size_t resultLength = 1;
+ if (DigitBits == 32) {
+ if (overflow) {
+ resultLength = 3;
+ } else if (res >> 32) {
+ resultLength = 2;
+ }
+ } else {
+ if (overflow) {
+ resultLength = 2;
+ }
+ }
+ BigInt* result = createUninitialized(cx, resultLength, resultNegative);
+ if (!result) {
+ return nullptr;
+ }
+ result->setDigit(0, res);
+ if (DigitBits == 32 && resultLength > 1) {
+ result->setDigit(1, res >> 32);
+ }
+ if (overflow) {
+ constexpr size_t overflowIndex = DigitBits == 32 ? 2 : 1;
+ result->setDigit(overflowIndex, 1);
+ }
+
+ MOZ_ASSERT(!HasLeadingZeroes(result));
+ return result;
+ }
+
+ BigInt* result =
+ createUninitialized(cx, left->digitLength() + 1, resultNegative);
+ if (!result) {
+ return nullptr;
+ }
+ Digit carry = 0;
+ unsigned i = 0;
+ for (; i < right->digitLength(); i++) {
+ Digit newCarry = 0;
+ Digit sum = digitAdd(left->digit(i), right->digit(i), &newCarry);
+ sum = digitAdd(sum, carry, &newCarry);
+ result->setDigit(i, sum);
+ carry = newCarry;
+ }
+
+ for (; i < left->digitLength(); i++) {
+ Digit newCarry = 0;
+ Digit sum = digitAdd(left->digit(i), carry, &newCarry);
+ result->setDigit(i, sum);
+ carry = newCarry;
+ }
+
+ result->setDigit(i, carry);
+
+ return destructivelyTrimHighZeroDigits(cx, result);
+}
+
+BigInt* BigInt::absoluteSub(JSContext* cx, HandleBigInt x, HandleBigInt y,
+ bool resultNegative) {
+ MOZ_ASSERT(x->digitLength() >= y->digitLength());
+ MOZ_ASSERT(absoluteCompare(x, y) > 0);
+ MOZ_ASSERT(!x->isZero());
+
+ if (y->isZero()) {
+ return resultNegative == x->isNegative() ? x : neg(cx, x);
+ }
+
+ // Fast path for the likely-common case of up to a uint64_t of magnitude.
+ if (x->absFitsInUint64()) {
+ MOZ_ASSERT(y->absFitsInUint64());
+
+ uint64_t lhs = x->uint64FromAbsNonZero();
+ uint64_t rhs = y->uint64FromAbsNonZero();
+ MOZ_ASSERT(lhs > rhs);
+
+ uint64_t res = lhs - rhs;
+ MOZ_ASSERT(res != 0);
+
+ return createFromNonZeroRawUint64(cx, res, resultNegative);
+ }
+
+ BigInt* result = createUninitialized(cx, x->digitLength(), resultNegative);
+ if (!result) {
+ return nullptr;
+ }
+ Digit borrow = 0;
+ unsigned i = 0;
+ for (; i < y->digitLength(); i++) {
+ Digit newBorrow = 0;
+ Digit difference = digitSub(x->digit(i), y->digit(i), &newBorrow);
+ difference = digitSub(difference, borrow, &newBorrow);
+ result->setDigit(i, difference);
+ borrow = newBorrow;
+ }
+
+ for (; i < x->digitLength(); i++) {
+ Digit newBorrow = 0;
+ Digit difference = digitSub(x->digit(i), borrow, &newBorrow);
+ result->setDigit(i, difference);
+ borrow = newBorrow;
+ }
+
+ MOZ_ASSERT(!borrow);
+ return destructivelyTrimHighZeroDigits(cx, result);
+}
+
+// Divides `x` by `divisor`, returning the result in `quotient` and `remainder`.
+// Mathematically, the contract is:
+//
+// quotient = (x - remainder) / divisor, with 0 <= remainder < divisor.
+//
+// If `quotient` is an empty handle, an appropriately sized BigInt will be
+// allocated for it; otherwise the caller must ensure that it is big enough.
+// `quotient` can be the same as `x` for an in-place division. `quotient` can
+// also be `Nothing()` if the caller is only interested in the remainder.
+//
+// This function returns false if `quotient` is an empty handle, but allocating
+// the quotient failed. Otherwise it returns true, indicating success.
+bool BigInt::absoluteDivWithDigitDivisor(
+ JSContext* cx, HandleBigInt x, Digit divisor,
+ const Maybe<MutableHandleBigInt>& quotient, Digit* remainder,
+ bool quotientNegative) {
+ MOZ_ASSERT(divisor);
+
+ MOZ_ASSERT(!x->isZero());
+ *remainder = 0;
+ if (divisor == 1) {
+ if (quotient) {
+ BigInt* q;
+ if (x->isNegative() == quotientNegative) {
+ q = x;
+ } else {
+ q = neg(cx, x);
+ if (!q) {
+ return false;
+ }
+ }
+ quotient.value().set(q);
+ }
+ return true;
+ }
+
+ unsigned length = x->digitLength();
+ if (quotient) {
+ if (!quotient.value()) {
+ BigInt* q = createUninitialized(cx, length, quotientNegative);
+ if (!q) {
+ return false;
+ }
+ quotient.value().set(q);
+ }
+
+ for (int i = length - 1; i >= 0; i--) {
+ Digit q = digitDiv(*remainder, x->digit(i), divisor, remainder);
+ quotient.value()->setDigit(i, q);
+ }
+ } else {
+ for (int i = length - 1; i >= 0; i--) {
+ digitDiv(*remainder, x->digit(i), divisor, remainder);
+ }
+ }
+
+ return true;
+}
+
+// Adds `summand` onto `this`, starting with `summand`'s 0th digit
+// at `this`'s `startIndex`'th digit. Returns the "carry" (0 or 1).
+BigInt::Digit BigInt::absoluteInplaceAdd(BigInt* summand, unsigned startIndex) {
+ Digit carry = 0;
+ unsigned n = summand->digitLength();
+ MOZ_ASSERT(digitLength() > startIndex,
+ "must start adding at an in-range digit");
+ MOZ_ASSERT(digitLength() - startIndex >= n,
+ "digits being added to must not extend above the digits in "
+ "this (except for the returned carry digit)");
+ for (unsigned i = 0; i < n; i++) {
+ Digit newCarry = 0;
+ Digit sum = digitAdd(digit(startIndex + i), summand->digit(i), &newCarry);
+ sum = digitAdd(sum, carry, &newCarry);
+ setDigit(startIndex + i, sum);
+ carry = newCarry;
+ }
+
+ return carry;
+}
+
+// Subtracts `subtrahend` from this, starting with `subtrahend`'s 0th digit
+// at `this`'s `startIndex`-th digit. Returns the "borrow" (0 or 1).
+BigInt::Digit BigInt::absoluteInplaceSub(BigInt* subtrahend,
+ unsigned startIndex) {
+ Digit borrow = 0;
+ unsigned n = subtrahend->digitLength();
+ MOZ_ASSERT(digitLength() > startIndex,
+ "must start subtracting from an in-range digit");
+ MOZ_ASSERT(digitLength() - startIndex >= n,
+ "digits being subtracted from must not extend above the "
+ "digits in this (except for the returned borrow digit)");
+ for (unsigned i = 0; i < n; i++) {
+ Digit newBorrow = 0;
+ Digit difference =
+ digitSub(digit(startIndex + i), subtrahend->digit(i), &newBorrow);
+ difference = digitSub(difference, borrow, &newBorrow);
+ setDigit(startIndex + i, difference);
+ borrow = newBorrow;
+ }
+
+ return borrow;
+}
+
+// Returns whether (factor1 * factor2) > (high << kDigitBits) + low.
+inline bool BigInt::productGreaterThan(Digit factor1, Digit factor2, Digit high,
+ Digit low) {
+ Digit resultHigh;
+ Digit resultLow = digitMul(factor1, factor2, &resultHigh);
+ return resultHigh > high || (resultHigh == high && resultLow > low);
+}
+
+void BigInt::inplaceRightShiftLowZeroBits(unsigned shift) {
+ MOZ_ASSERT(shift < DigitBits);
+ MOZ_ASSERT(!(digit(0) & ((static_cast<Digit>(1) << shift) - 1)),
+ "should only be shifting away zeroes");
+
+ if (!shift) {
+ return;
+ }
+
+ Digit carry = digit(0) >> shift;
+ unsigned last = digitLength() - 1;
+ for (unsigned i = 0; i < last; i++) {
+ Digit d = digit(i + 1);
+ setDigit(i, (d << (DigitBits - shift)) | carry);
+ carry = d >> shift;
+ }
+ setDigit(last, carry);
+}
+
+// Always copies the input, even when `shift` == 0.
+BigInt* BigInt::absoluteLeftShiftAlwaysCopy(JSContext* cx, HandleBigInt x,
+ unsigned shift,
+ LeftShiftMode mode) {
+ MOZ_ASSERT(shift < DigitBits);
+ MOZ_ASSERT(!x->isZero());
+
+ unsigned n = x->digitLength();
+ unsigned resultLength = mode == LeftShiftMode::AlwaysAddOneDigit ? n + 1 : n;
+ BigInt* result = createUninitialized(cx, resultLength, x->isNegative());
+ if (!result) {
+ return nullptr;
+ }
+
+ if (!shift) {
+ for (unsigned i = 0; i < n; i++) {
+ result->setDigit(i, x->digit(i));
+ }
+ if (mode == LeftShiftMode::AlwaysAddOneDigit) {
+ result->setDigit(n, 0);
+ }
+
+ return result;
+ }
+
+ Digit carry = 0;
+ for (unsigned i = 0; i < n; i++) {
+ Digit d = x->digit(i);
+ result->setDigit(i, (d << shift) | carry);
+ carry = d >> (DigitBits - shift);
+ }
+
+ if (mode == LeftShiftMode::AlwaysAddOneDigit) {
+ result->setDigit(n, carry);
+ } else {
+ MOZ_ASSERT(mode == LeftShiftMode::SameSizeResult);
+ MOZ_ASSERT(!carry);
+ }
+
+ return result;
+}
+
+// Divides `dividend` by `divisor`, returning the result in `quotient` and
+// `remainder`. Mathematically, the contract is:
+//
+// quotient = (dividend - remainder) / divisor, with 0 <= remainder < divisor.
+//
+// Both `quotient` and `remainder` are optional, for callers that are only
+// interested in one of them. See Knuth, Volume 2, section 4.3.1, Algorithm D.
+// Also see the overview of the algorithm by Jan Marthedal Rasmussen over at
+// https://janmr.com/blog/2014/04/basic-multiple-precision-long-division/.
+bool BigInt::absoluteDivWithBigIntDivisor(
+ JSContext* cx, HandleBigInt dividend, HandleBigInt divisor,
+ const Maybe<MutableHandleBigInt>& quotient,
+ const Maybe<MutableHandleBigInt>& remainder, bool isNegative) {
+ MOZ_ASSERT(divisor->digitLength() >= 2);
+ MOZ_ASSERT(dividend->digitLength() >= divisor->digitLength());
+
+ // Any early error return is detectable by checking the quotient and/or
+ // remainder output values.
+ MOZ_ASSERT(!quotient || !quotient.value());
+ MOZ_ASSERT(!remainder || !remainder.value());
+
+ // The unusual variable names inside this function are consistent with
+ // Knuth's book, as well as with Go's implementation of this algorithm.
+ // Maintaining this consistency is probably more useful than trying to
+ // come up with more descriptive names for them.
+ const unsigned n = divisor->digitLength();
+ const unsigned m = dividend->digitLength() - n;
+
+ // The quotient to be computed.
+ RootedBigInt q(cx);
+ if (quotient) {
+ q = createUninitialized(cx, m + 1, isNegative);
+ if (!q) {
+ return false;
+ }
+ }
+
+ // In each iteration, `qhatv` holds `divisor` * `current quotient digit`.
+ // "v" is the book's name for `divisor`, `qhat` the current quotient digit.
+ RootedBigInt qhatv(cx, createUninitialized(cx, n + 1, isNegative));
+ if (!qhatv) {
+ return false;
+ }
+
+ // D1.
+ // Left-shift inputs so that the divisor's MSB is set. This is necessary to
+ // prevent the digit-wise divisions (see digitDiv call below) from
+ // overflowing (they take a two digits wide input, and return a one digit
+ // result).
+ Digit lastDigit = divisor->digit(n - 1);
+ unsigned shift = DigitLeadingZeroes(lastDigit);
+
+ RootedBigInt shiftedDivisor(cx);
+ if (shift > 0) {
+ shiftedDivisor = absoluteLeftShiftAlwaysCopy(cx, divisor, shift,
+ LeftShiftMode::SameSizeResult);
+ if (!shiftedDivisor) {
+ return false;
+ }
+ } else {
+ shiftedDivisor = divisor;
+ }
+
+ // Holds the (continuously updated) remaining part of the dividend, which
+ // eventually becomes the remainder.
+ RootedBigInt u(cx,
+ absoluteLeftShiftAlwaysCopy(cx, dividend, shift,
+ LeftShiftMode::AlwaysAddOneDigit));
+ if (!u) {
+ return false;
+ }
+
+ // D2.
+ // Iterate over the dividend's digit (like the "grade school" algorithm).
+ // `vn1` is the divisor's most significant digit.
+ Digit vn1 = shiftedDivisor->digit(n - 1);
+ for (int j = m; j >= 0; j--) {
+ // D3.
+ // Estimate the current iteration's quotient digit (see Knuth for details).
+ // `qhat` is the current quotient digit.
+ Digit qhat = std::numeric_limits<Digit>::max();
+
+ // `ujn` is the dividend's most significant remaining digit.
+ Digit ujn = u->digit(j + n);
+ if (ujn != vn1) {
+ // `rhat` is the current iteration's remainder.
+ Digit rhat = 0;
+ // Estimate the current quotient digit by dividing the most significant
+ // digits of dividend and divisor. The result will not be too small,
+ // but could be a bit too large.
+ qhat = digitDiv(ujn, u->digit(j + n - 1), vn1, &rhat);
+
+ // Decrement the quotient estimate as needed by looking at the next
+ // digit, i.e. by testing whether
+ // qhat * v_{n-2} > (rhat << DigitBits) + u_{j+n-2}.
+ Digit vn2 = shiftedDivisor->digit(n - 2);
+ Digit ujn2 = u->digit(j + n - 2);
+ while (productGreaterThan(qhat, vn2, rhat, ujn2)) {
+ qhat--;
+ Digit prevRhat = rhat;
+ rhat += vn1;
+ // v[n-1] >= 0, so this tests for overflow.
+ if (rhat < prevRhat) {
+ break;
+ }
+ }
+ }
+
+ // D4.
+ // Multiply the divisor with the current quotient digit, and subtract
+ // it from the dividend. If there was "borrow", then the quotient digit
+ // was one too high, so we must correct it and undo one subtraction of
+ // the (shifted) divisor.
+ internalMultiplyAdd(shiftedDivisor, qhat, 0, n, qhatv);
+ Digit c = u->absoluteInplaceSub(qhatv, j);
+ if (c) {
+ c = u->absoluteInplaceAdd(shiftedDivisor, j);
+ u->setDigit(j + n, u->digit(j + n) + c);
+ qhat--;
+ }
+
+ if (quotient) {
+ q->setDigit(j, qhat);
+ }
+ }
+
+ if (quotient) {
+ BigInt* bi = destructivelyTrimHighZeroDigits(cx, q);
+ if (!bi) {
+ return false;
+ }
+ quotient.value().set(q);
+ }
+
+ if (remainder) {
+ u->inplaceRightShiftLowZeroBits(shift);
+ remainder.value().set(u);
+ }
+
+ return true;
+}
+
+// Helper for Absolute{And,AndNot,Or,Xor}.
+// Performs the given binary `op` on digit pairs of `x` and `y`; when the
+// end of the shorter of the two is reached, `kind` configures how
+// remaining digits are handled.
+// Example:
+// y: [ y2 ][ y1 ][ y0 ]
+// x: [ x3 ][ x2 ][ x1 ][ x0 ]
+// | | | |
+// (Fill) (op) (op) (op)
+// | | | |
+// v v v v
+// result: [ 0 ][ x3 ][ r2 ][ r1 ][ r0 ]
+template <BigInt::BitwiseOpKind kind, typename BitwiseOp>
+inline BigInt* BigInt::absoluteBitwiseOp(JSContext* cx, HandleBigInt x,
+ HandleBigInt y, BitwiseOp&& op) {
+ unsigned xLength = x->digitLength();
+ unsigned yLength = y->digitLength();
+ unsigned numPairs = std::min(xLength, yLength);
+ unsigned resultLength;
+ if (kind == BitwiseOpKind::SymmetricTrim) {
+ resultLength = numPairs;
+ } else if (kind == BitwiseOpKind::SymmetricFill) {
+ resultLength = std::max(xLength, yLength);
+ } else {
+ MOZ_ASSERT(kind == BitwiseOpKind::AsymmetricFill);
+ resultLength = xLength;
+ }
+ bool resultNegative = false;
+
+ BigInt* result = createUninitialized(cx, resultLength, resultNegative);
+ if (!result) {
+ return nullptr;
+ }
+
+ unsigned i = 0;
+ for (; i < numPairs; i++) {
+ result->setDigit(i, op(x->digit(i), y->digit(i)));
+ }
+
+ if (kind != BitwiseOpKind::SymmetricTrim) {
+ BigInt* source = kind == BitwiseOpKind::AsymmetricFill ? x
+ : xLength == i ? y
+ : x;
+ for (; i < resultLength; i++) {
+ result->setDigit(i, source->digit(i));
+ }
+ }
+
+ MOZ_ASSERT(i == resultLength);
+
+ return destructivelyTrimHighZeroDigits(cx, result);
+}
+
+BigInt* BigInt::absoluteAnd(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ return absoluteBitwiseOp<BitwiseOpKind::SymmetricTrim>(cx, x, y,
+ std::bit_and<Digit>());
+}
+
+BigInt* BigInt::absoluteOr(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ return absoluteBitwiseOp<BitwiseOpKind::SymmetricFill>(cx, x, y,
+ std::bit_or<Digit>());
+}
+
+BigInt* BigInt::absoluteAndNot(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ auto digitOperation = [](Digit a, Digit b) { return a & ~b; };
+ return absoluteBitwiseOp<BitwiseOpKind::AsymmetricFill>(cx, x, y,
+ digitOperation);
+}
+
+BigInt* BigInt::absoluteXor(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ return absoluteBitwiseOp<BitwiseOpKind::SymmetricFill>(cx, x, y,
+ std::bit_xor<Digit>());
+}
+
+BigInt* BigInt::absoluteAddOne(JSContext* cx, HandleBigInt x,
+ bool resultNegative) {
+ unsigned inputLength = x->digitLength();
+ // The addition will overflow into a new digit if all existing digits are
+ // at maximum.
+ bool willOverflow = true;
+ for (unsigned i = 0; i < inputLength; i++) {
+ if (std::numeric_limits<Digit>::max() != x->digit(i)) {
+ willOverflow = false;
+ break;
+ }
+ }
+
+ unsigned resultLength = inputLength + willOverflow;
+ BigInt* result = createUninitialized(cx, resultLength, resultNegative);
+ if (!result) {
+ return nullptr;
+ }
+
+ Digit carry = 1;
+ for (unsigned i = 0; i < inputLength; i++) {
+ Digit newCarry = 0;
+ result->setDigit(i, digitAdd(x->digit(i), carry, &newCarry));
+ carry = newCarry;
+ }
+ if (resultLength > inputLength) {
+ MOZ_ASSERT(carry == 1);
+ result->setDigit(inputLength, 1);
+ } else {
+ MOZ_ASSERT(!carry);
+ }
+
+ return destructivelyTrimHighZeroDigits(cx, result);
+}
+
+BigInt* BigInt::absoluteSubOne(JSContext* cx, HandleBigInt x,
+ bool resultNegative) {
+ MOZ_ASSERT(!x->isZero());
+
+ unsigned length = x->digitLength();
+
+ if (length == 1) {
+ Digit d = x->digit(0);
+ if (d == 1) {
+ // Ignore resultNegative.
+ return zero(cx);
+ }
+ return createFromDigit(cx, d - 1, resultNegative);
+ }
+
+ BigInt* result = createUninitialized(cx, length, resultNegative);
+ if (!result) {
+ return nullptr;
+ }
+
+ Digit borrow = 1;
+ for (unsigned i = 0; i < length; i++) {
+ Digit newBorrow = 0;
+ result->setDigit(i, digitSub(x->digit(i), borrow, &newBorrow));
+ borrow = newBorrow;
+ }
+ MOZ_ASSERT(!borrow);
+
+ return destructivelyTrimHighZeroDigits(cx, result);
+}
+
+BigInt* BigInt::inc(JSContext* cx, HandleBigInt x) {
+ if (x->isZero()) {
+ return one(cx);
+ }
+
+ bool isNegative = x->isNegative();
+ if (isNegative) {
+ return absoluteSubOne(cx, x, isNegative);
+ }
+
+ return absoluteAddOne(cx, x, isNegative);
+}
+
+BigInt* BigInt::dec(JSContext* cx, HandleBigInt x) {
+ if (x->isZero()) {
+ return negativeOne(cx);
+ }
+
+ bool isNegative = x->isNegative();
+ if (isNegative) {
+ return absoluteAddOne(cx, x, isNegative);
+ }
+
+ return absoluteSubOne(cx, x, isNegative);
+}
+
+// Lookup table for the maximum number of bits required per character of a
+// base-N string representation of a number. To increase accuracy, the array
+// value is the actual value multiplied by 32. To generate this table:
+// for (var i = 0; i <= 36; i++) { print(Math.ceil(Math.log2(i) * 32) + ","); }
+static constexpr uint8_t maxBitsPerCharTable[] = {
+ 0, 0, 32, 51, 64, 75, 83, 90, 96, // 0..8
+ 102, 107, 111, 115, 119, 122, 126, 128, // 9..16
+ 131, 134, 136, 139, 141, 143, 145, 147, // 17..24
+ 149, 151, 153, 154, 156, 158, 159, 160, // 25..32
+ 162, 163, 165, 166, // 33..36
+};
+
+static constexpr unsigned bitsPerCharTableShift = 5;
+static constexpr size_t bitsPerCharTableMultiplier = 1u
+ << bitsPerCharTableShift;
+static constexpr char radixDigits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+
+static inline uint64_t CeilDiv(uint64_t numerator, uint64_t denominator) {
+ MOZ_ASSERT(numerator != 0);
+ return 1 + (numerator - 1) / denominator;
+};
+
+// Compute (an overapproximation of) the length of the string representation of
+// a BigInt. In base B an X-digit number has maximum value:
+//
+// B**X - 1
+//
+// We're trying to find N for an N-digit number in base |radix| full
+// representing a |bitLength|-digit number in base 2, so we have:
+//
+// radix**N - 1 ≥ 2**bitLength - 1
+// radix**N ≥ 2**bitLength
+// N ≥ log2(2**bitLength) / log2(radix)
+// N ≥ bitLength / log2(radix)
+//
+// so the smallest N is:
+//
+// N = ⌈bitLength / log2(radix)⌉
+//
+// We want to avoid floating-point computations and precompute the logarithm, so
+// we multiply both sides of the division by |bitsPerCharTableMultiplier|:
+//
+// N = ⌈(bPCTM * bitLength) / (bPCTM * log2(radix))⌉
+//
+// and then because |maxBitsPerChar| representing the denominator may have been
+// rounded *up* -- which could produce an overall under-computation -- we reduce
+// by one to undo any rounding and conservatively compute:
+//
+// N ≥ ⌈(bPCTM * bitLength) / (maxBitsPerChar - 1)⌉
+//
+size_t BigInt::calculateMaximumCharactersRequired(HandleBigInt x,
+ unsigned radix) {
+ MOZ_ASSERT(!x->isZero());
+ MOZ_ASSERT(radix >= 2 && radix <= 36);
+
+ size_t length = x->digitLength();
+ Digit lastDigit = x->digit(length - 1);
+ size_t bitLength = length * DigitBits - DigitLeadingZeroes(lastDigit);
+
+ uint8_t maxBitsPerChar = maxBitsPerCharTable[radix];
+ uint64_t maximumCharactersRequired =
+ CeilDiv(static_cast<uint64_t>(bitsPerCharTableMultiplier) * bitLength,
+ maxBitsPerChar - 1);
+ maximumCharactersRequired += x->isNegative();
+
+ return AssertedCast<size_t>(maximumCharactersRequired);
+}
+
+template <AllowGC allowGC>
+JSLinearString* BigInt::toStringBasePowerOfTwo(JSContext* cx, HandleBigInt x,
+ unsigned radix) {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(radix));
+ MOZ_ASSERT(radix >= 2 && radix <= 32);
+ MOZ_ASSERT(!x->isZero());
+
+ const unsigned length = x->digitLength();
+ const bool sign = x->isNegative();
+ const unsigned bitsPerChar = mozilla::CountTrailingZeroes32(radix);
+ const unsigned charMask = radix - 1;
+ // Compute the length of the resulting string: divide the bit length of the
+ // BigInt by the number of bits representable per character (rounding up).
+ const Digit msd = x->digit(length - 1);
+
+ const size_t bitLength = length * DigitBits - DigitLeadingZeroes(msd);
+ const size_t charsRequired = CeilDiv(bitLength, bitsPerChar) + sign;
+
+ if (charsRequired > JSString::MAX_LENGTH) {
+ if constexpr (allowGC) {
+ ReportAllocationOverflow(cx);
+ }
+ return nullptr;
+ }
+
+ auto resultChars = cx->make_pod_array<char>(charsRequired);
+ if (!resultChars) {
+ if constexpr (!allowGC) {
+ cx->recoverFromOutOfMemory();
+ }
+ return nullptr;
+ }
+
+ Digit digit = 0;
+ // Keeps track of how many unprocessed bits there are in |digit|.
+ unsigned availableBits = 0;
+ size_t pos = charsRequired;
+ for (unsigned i = 0; i < length - 1; i++) {
+ Digit newDigit = x->digit(i);
+ // Take any leftover bits from the last iteration into account.
+ unsigned current = (digit | (newDigit << availableBits)) & charMask;
+ MOZ_ASSERT(pos);
+ resultChars[--pos] = radixDigits[current];
+ unsigned consumedBits = bitsPerChar - availableBits;
+ digit = newDigit >> consumedBits;
+ availableBits = DigitBits - consumedBits;
+ while (availableBits >= bitsPerChar) {
+ MOZ_ASSERT(pos);
+ resultChars[--pos] = radixDigits[digit & charMask];
+ digit >>= bitsPerChar;
+ availableBits -= bitsPerChar;
+ }
+ }
+
+ // Write out the character containing the lowest-order bit of |msd|.
+ //
+ // This character may include leftover bits from the Digit below |msd|. For
+ // example, if |x === 2n**64n| and |radix == 32|: the preceding loop writes
+ // twelve zeroes for low-order bits 0-59 in |x->digit(0)| (and |x->digit(1)|
+ // on 32-bit); then the highest 4 bits of of |x->digit(0)| (or |x->digit(1)|
+ // on 32-bit) and bit 0 of |x->digit(1)| (|x->digit(2)| on 32-bit) will
+ // comprise the |current == 0b1'0000| computed below for the high-order 'g'
+ // character.
+ unsigned current = (digit | (msd << availableBits)) & charMask;
+ MOZ_ASSERT(pos);
+ resultChars[--pos] = radixDigits[current];
+
+ // Write out remaining characters represented by |msd|. (There may be none,
+ // as in the example above.)
+ digit = msd >> (bitsPerChar - availableBits);
+ while (digit != 0) {
+ MOZ_ASSERT(pos);
+ resultChars[--pos] = radixDigits[digit & charMask];
+ digit >>= bitsPerChar;
+ }
+
+ if (sign) {
+ MOZ_ASSERT(pos);
+ resultChars[--pos] = '-';
+ }
+
+ MOZ_ASSERT(pos == 0);
+ return NewStringCopyN<allowGC>(cx, resultChars.get(), charsRequired);
+}
+
+template <AllowGC allowGC>
+JSLinearString* BigInt::toStringSingleDigitBaseTen(JSContext* cx, Digit digit,
+ bool isNegative) {
+ if (digit <= Digit(INT32_MAX)) {
+ int32_t val = AssertedCast<int32_t>(digit);
+ return Int32ToString<allowGC>(cx, isNegative ? -val : val);
+ }
+
+ MOZ_ASSERT(digit != 0, "zero case should have been handled in toString");
+
+ constexpr size_t maxLength = 1 + (std::numeric_limits<Digit>::digits10 + 1);
+ static_assert(maxLength == 11 || maxLength == 21,
+ "unexpected decimal string length");
+
+ char resultChars[maxLength];
+ size_t writePos = maxLength;
+
+ while (digit != 0) {
+ MOZ_ASSERT(writePos > 0);
+ resultChars[--writePos] = radixDigits[digit % 10];
+ digit /= 10;
+ }
+ MOZ_ASSERT(writePos < maxLength);
+ MOZ_ASSERT(resultChars[writePos] != '0');
+
+ if (isNegative) {
+ MOZ_ASSERT(writePos > 0);
+ resultChars[--writePos] = '-';
+ }
+
+ MOZ_ASSERT(writePos < maxLength);
+ return NewStringCopyN<allowGC>(cx, resultChars + writePos,
+ maxLength - writePos);
+}
+
+static constexpr BigInt::Digit MaxPowerInDigit(uint8_t radix) {
+ BigInt::Digit result = 1;
+ while (result < BigInt::Digit(-1) / radix) {
+ result *= radix;
+ }
+ return result;
+}
+
+static constexpr uint8_t MaxExponentInDigit(uint8_t radix) {
+ uint8_t exp = 0;
+ BigInt::Digit result = 1;
+ while (result < BigInt::Digit(-1) / radix) {
+ result *= radix;
+ exp += 1;
+ }
+ return exp;
+}
+
+struct RadixInfo {
+ BigInt::Digit maxPowerInDigit;
+ uint8_t maxExponentInDigit;
+
+ constexpr RadixInfo(BigInt::Digit maxPower, uint8_t maxExponent)
+ : maxPowerInDigit(maxPower), maxExponentInDigit(maxExponent) {}
+
+ explicit constexpr RadixInfo(uint8_t radix)
+ : RadixInfo(MaxPowerInDigit(radix), MaxExponentInDigit(radix)) {}
+};
+
+static constexpr const RadixInfo toStringInfo[37] = {
+ {0, 0}, {0, 0}, RadixInfo(2), RadixInfo(3), RadixInfo(4),
+ RadixInfo(5), RadixInfo(6), RadixInfo(7), RadixInfo(8), RadixInfo(9),
+ RadixInfo(10), RadixInfo(11), RadixInfo(12), RadixInfo(13), RadixInfo(14),
+ RadixInfo(15), RadixInfo(16), RadixInfo(17), RadixInfo(18), RadixInfo(19),
+ RadixInfo(20), RadixInfo(21), RadixInfo(22), RadixInfo(23), RadixInfo(24),
+ RadixInfo(25), RadixInfo(26), RadixInfo(27), RadixInfo(28), RadixInfo(29),
+ RadixInfo(30), RadixInfo(31), RadixInfo(32), RadixInfo(33), RadixInfo(34),
+ RadixInfo(35), RadixInfo(36),
+};
+
+JSLinearString* BigInt::toStringGeneric(JSContext* cx, HandleBigInt x,
+ unsigned radix) {
+ MOZ_ASSERT(radix >= 2 && radix <= 36);
+ MOZ_ASSERT(!x->isZero());
+
+ size_t maximumCharactersRequired =
+ calculateMaximumCharactersRequired(x, radix);
+ if (maximumCharactersRequired > JSString::MAX_LENGTH) {
+ ReportAllocationOverflow(cx);
+ return nullptr;
+ }
+
+ UniqueChars resultString(js_pod_malloc<char>(maximumCharactersRequired));
+ if (!resultString) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ size_t writePos = maximumCharactersRequired;
+ unsigned length = x->digitLength();
+ Digit lastDigit;
+ if (length == 1) {
+ lastDigit = x->digit(0);
+ } else {
+ unsigned chunkChars = toStringInfo[radix].maxExponentInDigit;
+ Digit chunkDivisor = toStringInfo[radix].maxPowerInDigit;
+
+ unsigned nonZeroDigit = length - 1;
+ MOZ_ASSERT(x->digit(nonZeroDigit) != 0);
+
+ // `rest` holds the part of the BigInt that we haven't looked at yet.
+ // Not to be confused with "remainder"!
+ RootedBigInt rest(cx);
+
+ // In the first round, divide the input, allocating a new BigInt for
+ // the result == rest; from then on divide the rest in-place.
+ //
+ // FIXME: absoluteDivWithDigitDivisor doesn't
+ // destructivelyTrimHighZeroDigits for in-place divisions, leading to
+ // worse constant factors. See
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=1510213.
+ RootedBigInt dividend(cx, x);
+ do {
+ Digit chunk;
+ if (!absoluteDivWithDigitDivisor(cx, dividend, chunkDivisor, Some(&rest),
+ &chunk, dividend->isNegative())) {
+ return nullptr;
+ }
+
+ dividend = rest;
+ for (unsigned i = 0; i < chunkChars; i++) {
+ MOZ_ASSERT(writePos > 0);
+ resultString[--writePos] = radixDigits[chunk % radix];
+ chunk /= radix;
+ }
+ MOZ_ASSERT(!chunk);
+
+ if (!rest->digit(nonZeroDigit)) {
+ nonZeroDigit--;
+ }
+
+ MOZ_ASSERT(rest->digit(nonZeroDigit) != 0,
+ "division by a single digit can't remove more than one "
+ "digit from a number");
+ } while (nonZeroDigit > 0);
+
+ lastDigit = rest->digit(0);
+ }
+
+ do {
+ MOZ_ASSERT(writePos > 0);
+ resultString[--writePos] = radixDigits[lastDigit % radix];
+ lastDigit /= radix;
+ } while (lastDigit > 0);
+ MOZ_ASSERT(writePos < maximumCharactersRequired);
+ MOZ_ASSERT(maximumCharactersRequired - writePos <=
+ static_cast<size_t>(maximumCharactersRequired));
+
+ // Remove leading zeroes.
+ while (writePos + 1 < maximumCharactersRequired &&
+ resultString[writePos] == '0') {
+ writePos++;
+ }
+
+ if (x->isNegative()) {
+ MOZ_ASSERT(writePos > 0);
+ resultString[--writePos] = '-';
+ }
+
+ MOZ_ASSERT(writePos < maximumCharactersRequired);
+ // Would be better to somehow adopt resultString directly.
+ return NewStringCopyN<CanGC>(cx, resultString.get() + writePos,
+ maximumCharactersRequired - writePos);
+}
+
+static void FreeDigits(JSContext* cx, BigInt* bi, BigInt::Digit* digits,
+ size_t nbytes) {
+ MOZ_ASSERT(cx->isMainThreadContext());
+
+ if (bi->isTenured()) {
+ MOZ_ASSERT(!cx->nursery().isInside(digits));
+ js_free(digits);
+ } else {
+ cx->nursery().freeBuffer(digits, nbytes);
+ }
+}
+
+BigInt* BigInt::destructivelyTrimHighZeroDigits(JSContext* cx, BigInt* x) {
+ if (x->isZero()) {
+ MOZ_ASSERT(!x->isNegative());
+ return x;
+ }
+ MOZ_ASSERT(x->digitLength());
+
+ int nonZeroIndex = x->digitLength() - 1;
+ while (nonZeroIndex >= 0 && x->digit(nonZeroIndex) == 0) {
+ nonZeroIndex--;
+ }
+
+ if (nonZeroIndex < 0) {
+ return zero(cx);
+ }
+
+ if (nonZeroIndex == static_cast<int>(x->digitLength() - 1)) {
+ return x;
+ }
+
+ unsigned newLength = nonZeroIndex + 1;
+
+ if (newLength > InlineDigitsLength) {
+ MOZ_ASSERT(x->hasHeapDigits());
+
+ size_t oldLength = x->digitLength();
+ Digit* newdigits =
+ js::ReallocateBigIntDigits(cx, x, x->heapDigits_, oldLength, newLength);
+ if (!newdigits) {
+ return nullptr;
+ }
+ x->heapDigits_ = newdigits;
+
+ RemoveCellMemory(x, oldLength * sizeof(Digit), js::MemoryUse::BigIntDigits);
+ AddCellMemory(x, newLength * sizeof(Digit), js::MemoryUse::BigIntDigits);
+ } else {
+ if (x->hasHeapDigits()) {
+ Digit digits[InlineDigitsLength];
+ std::copy_n(x->heapDigits_, InlineDigitsLength, digits);
+
+ size_t nbytes = x->digitLength() * sizeof(Digit);
+ FreeDigits(cx, x, x->heapDigits_, nbytes);
+ RemoveCellMemory(x, nbytes, js::MemoryUse::BigIntDigits);
+
+ std::copy_n(digits, InlineDigitsLength, x->inlineDigits_);
+ }
+ }
+
+ x->setLengthAndFlags(newLength, x->isNegative() ? SignBit : 0);
+
+ return x;
+}
+
+// The maximum value `radix**charCount - 1` must be represented as a max number
+// `2**(N * DigitBits) - 1` for `N` digits, so
+//
+// 2**(N * DigitBits) - 1 ≥ radix**charcount - 1
+// 2**(N * DigitBits) ≥ radix**charcount
+// N * DigitBits ≥ log2(radix**charcount)
+// N * DigitBits ≥ charcount * log2(radix)
+// N ≥ ⌈charcount * log2(radix) / DigitBits⌉ (conservatively)
+//
+// or in the code's terms (all numbers promoted to exact mathematical values),
+//
+// N ≥ ⌈charcount * bitsPerChar / (DigitBits * bitsPerCharTableMultiplier)⌉
+//
+// Note that `N` is computed even more conservatively here because `bitsPerChar`
+// is rounded up.
+bool BigInt::calculateMaximumDigitsRequired(JSContext* cx, uint8_t radix,
+ size_t charcount, size_t* result) {
+ MOZ_ASSERT(2 <= radix && radix <= 36);
+
+ uint8_t bitsPerChar = maxBitsPerCharTable[radix];
+
+ MOZ_ASSERT(charcount > 0);
+ MOZ_ASSERT(charcount <= std::numeric_limits<uint64_t>::max() / bitsPerChar);
+ static_assert(
+ MaxDigitLength < std::numeric_limits<size_t>::max(),
+ "can't safely cast calculateMaximumDigitsRequired result to size_t");
+
+ uint64_t n = CeilDiv(static_cast<uint64_t>(charcount) * bitsPerChar,
+ DigitBits * bitsPerCharTableMultiplier);
+ if (n > MaxDigitLength) {
+ ReportOversizedAllocation(cx, JSMSG_BIGINT_TOO_LARGE);
+ return false;
+ }
+
+ *result = n;
+ return true;
+}
+
+template <typename CharT>
+BigInt* BigInt::parseLiteralDigits(JSContext* cx,
+ const Range<const CharT> chars,
+ unsigned radix, bool isNegative,
+ bool* haveParseError, gc::Heap heap) {
+ static_assert(
+ std::is_same_v<CharT, JS::Latin1Char> || std::is_same_v<CharT, char16_t>,
+ "only the bare minimum character types are supported, to avoid "
+ "excessively instantiating this template");
+
+ MOZ_ASSERT(chars.length());
+
+ RangedPtr<const CharT> start = chars.begin();
+ RangedPtr<const CharT> end = chars.end();
+
+ // Skipping leading zeroes.
+ while (start[0] == '0') {
+ start++;
+ if (start == end) {
+ return zero(cx, heap);
+ }
+ }
+
+ unsigned limit0 = '0' + std::min(radix, 10u);
+ unsigned limita = 'a' + (radix - 10);
+ unsigned limitA = 'A' + (radix - 10);
+
+ size_t length;
+ if (!calculateMaximumDigitsRequired(cx, radix, end - start, &length)) {
+ return nullptr;
+ }
+ BigInt* result = createUninitialized(cx, length, isNegative, heap);
+ if (!result) {
+ return nullptr;
+ }
+
+ result->initializeDigitsToZero();
+
+ for (; start < end; start++) {
+ uint32_t digit;
+ CharT c = *start;
+ if (c >= '0' && c < limit0) {
+ digit = c - '0';
+ } else if (c >= 'a' && c < limita) {
+ digit = c - 'a' + 10;
+ } else if (c >= 'A' && c < limitA) {
+ digit = c - 'A' + 10;
+ } else {
+ *haveParseError = true;
+ return nullptr;
+ }
+
+ result->inplaceMultiplyAdd(static_cast<Digit>(radix),
+ static_cast<Digit>(digit));
+ }
+
+ return destructivelyTrimHighZeroDigits(cx, result);
+}
+
+// BigInt proposal section 7.2
+template <typename CharT>
+BigInt* BigInt::parseLiteral(JSContext* cx, const Range<const CharT> chars,
+ bool* haveParseError, js::gc::Heap heap) {
+ RangedPtr<const CharT> start = chars.begin();
+ const RangedPtr<const CharT> end = chars.end();
+ bool isNegative = false;
+
+ MOZ_ASSERT(chars.length());
+
+ if (end - start > 2 && start[0] == '0') {
+ if (start[1] == 'b' || start[1] == 'B') {
+ // StringNumericLiteral ::: BinaryIntegerLiteral
+ return parseLiteralDigits(cx, Range<const CharT>(start + 2, end), 2,
+ isNegative, haveParseError, heap);
+ }
+ if (start[1] == 'x' || start[1] == 'X') {
+ // StringNumericLiteral ::: HexIntegerLiteral
+ return parseLiteralDigits(cx, Range<const CharT>(start + 2, end), 16,
+ isNegative, haveParseError, heap);
+ }
+ if (start[1] == 'o' || start[1] == 'O') {
+ // StringNumericLiteral ::: OctalIntegerLiteral
+ return parseLiteralDigits(cx, Range<const CharT>(start + 2, end), 8,
+ isNegative, haveParseError, heap);
+ }
+ }
+
+ return parseLiteralDigits(cx, Range<const CharT>(start, end), 10, isNegative,
+ haveParseError, heap);
+}
+
+// trim and remove radix selection prefix.
+template <typename CharT>
+bool BigInt::literalIsZero(const Range<const CharT> chars) {
+ RangedPtr<const CharT> start = chars.begin();
+ const RangedPtr<const CharT> end = chars.end();
+
+ MOZ_ASSERT(chars.length());
+
+ // Skip over radix selector.
+ if (end - start > 2 && start[0] == '0') {
+ if (start[1] == 'b' || start[1] == 'B' || start[1] == 'x' ||
+ start[1] == 'X' || start[1] == 'o' || start[1] == 'O') {
+ start += 2;
+ }
+ }
+
+ // Skipping leading zeroes.
+ while (start[0] == '0') {
+ start++;
+ if (start == end) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+template bool BigInt::literalIsZero(const Range<const char16_t> chars);
+
+BigInt* BigInt::createFromDouble(JSContext* cx, double d) {
+ MOZ_ASSERT(IsInteger(d), "Only integer-valued doubles can convert to BigInt");
+
+ if (d == 0) {
+ return zero(cx);
+ }
+
+ int exponent = mozilla::ExponentComponent(d);
+ MOZ_ASSERT(exponent >= 0);
+ int length = exponent / DigitBits + 1;
+ BigInt* result = createUninitialized(cx, length, d < 0);
+ if (!result) {
+ return nullptr;
+ }
+
+ // We construct a BigInt from the double `d` by shifting its mantissa
+ // according to its exponent and mapping the bit pattern onto digits.
+ //
+ // <----------- bitlength = exponent + 1 ----------->
+ // <----- 52 ------> <------ trailing zeroes ------>
+ // mantissa: 1yyyyyyyyyyyyyyyyy 0000000000000000000000000000000
+ // digits: 0001xxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
+ // <--> <------>
+ // msdTopBits DigitBits
+ //
+ using Double = mozilla::FloatingPoint<double>;
+ uint64_t mantissa =
+ mozilla::BitwiseCast<uint64_t>(d) & Double::kSignificandBits;
+ // Add implicit high bit.
+ mantissa |= 1ull << Double::kSignificandWidth;
+
+ const int mantissaTopBit = Double::kSignificandWidth; // 0-indexed.
+
+ // 0-indexed position of `d`'s most significant bit within the `msd`.
+ int msdTopBit = exponent % DigitBits;
+
+ // Next digit under construction.
+ Digit digit;
+
+ // First, build the MSD by shifting the mantissa appropriately.
+ if (msdTopBit < mantissaTopBit) {
+ int remainingMantissaBits = mantissaTopBit - msdTopBit;
+ digit = mantissa >> remainingMantissaBits;
+ mantissa = mantissa << (64 - remainingMantissaBits);
+ } else {
+ MOZ_ASSERT(msdTopBit >= mantissaTopBit);
+ digit = mantissa << (msdTopBit - mantissaTopBit);
+ mantissa = 0;
+ }
+ MOZ_ASSERT(digit != 0, "most significant digit should not be zero");
+ result->setDigit(--length, digit);
+
+ // Fill in digits containing mantissa contributions.
+ while (mantissa) {
+ MOZ_ASSERT(length > 0,
+ "double bits were all non-fractional, so there must be "
+ "digits present to hold them");
+
+ if (DigitBits == 64) {
+ result->setDigit(--length, mantissa);
+ break;
+ }
+
+ MOZ_ASSERT(DigitBits == 32);
+ Digit current = mantissa >> 32;
+ mantissa = mantissa << 32;
+ result->setDigit(--length, current);
+ }
+
+ // Fill in low-order zeroes.
+ for (int i = length - 1; i >= 0; i--) {
+ result->setDigit(i, 0);
+ }
+
+ return result;
+}
+
+BigInt* BigInt::createFromUint64(JSContext* cx, uint64_t n) {
+ if (n == 0) {
+ return zero(cx);
+ }
+
+ const bool isNegative = false;
+
+ if (DigitBits == 32) {
+ Digit low = n;
+ Digit high = n >> 32;
+ size_t length = high ? 2 : 1;
+
+ BigInt* res = createUninitialized(cx, length, isNegative);
+ if (!res) {
+ return nullptr;
+ }
+ res->setDigit(0, low);
+ if (high) {
+ res->setDigit(1, high);
+ }
+ return res;
+ }
+
+ return createFromDigit(cx, n, isNegative);
+}
+
+BigInt* BigInt::createFromInt64(JSContext* cx, int64_t n) {
+ BigInt* res = createFromUint64(cx, Abs(n));
+ if (!res) {
+ return nullptr;
+ }
+
+ if (n < 0) {
+ res->setHeaderFlagBit(SignBit);
+ }
+ MOZ_ASSERT(res->isNegative() == (n < 0));
+
+ return res;
+}
+
+// BigInt proposal section 5.1.2
+BigInt* js::NumberToBigInt(JSContext* cx, double d) {
+ // Step 1 is an assertion checked by the caller.
+ // Step 2.
+ if (!IsInteger(d)) {
+ ToCStringBuf cbuf;
+ const char* str = NumberToCString(&cbuf, d);
+ MOZ_ASSERT(str);
+
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_NONINTEGER_NUMBER_TO_BIGINT, str);
+ return nullptr;
+ }
+
+ // Step 3.
+ return BigInt::createFromDouble(cx, d);
+}
+
+BigInt* BigInt::copy(JSContext* cx, HandleBigInt x, gc::Heap heap) {
+ if (x->isZero()) {
+ return zero(cx, heap);
+ }
+
+ BigInt* result =
+ createUninitialized(cx, x->digitLength(), x->isNegative(), heap);
+ if (!result) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < x->digitLength(); i++) {
+ result->setDigit(i, x->digit(i));
+ }
+ return result;
+}
+
+// BigInt proposal section 1.1.7
+BigInt* BigInt::add(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ bool xNegative = x->isNegative();
+
+ // x + y == x + y
+ // -x + -y == -(x + y)
+ if (xNegative == y->isNegative()) {
+ return absoluteAdd(cx, x, y, xNegative);
+ }
+
+ // x + -y == x - y == -(y - x)
+ // -x + y == y - x == -(x - y)
+ int8_t compare = absoluteCompare(x, y);
+ if (compare == 0) {
+ return zero(cx);
+ }
+
+ if (compare > 0) {
+ return absoluteSub(cx, x, y, xNegative);
+ }
+
+ return absoluteSub(cx, y, x, !xNegative);
+}
+
+// BigInt proposal section 1.1.8
+BigInt* BigInt::sub(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ bool xNegative = x->isNegative();
+ if (xNegative != y->isNegative()) {
+ // x - (-y) == x + y
+ // (-x) - y == -(x + y)
+ return absoluteAdd(cx, x, y, xNegative);
+ }
+
+ // x - y == -(y - x)
+ // (-x) - (-y) == y - x == -(x - y)
+ int8_t compare = absoluteCompare(x, y);
+ if (compare == 0) {
+ return zero(cx);
+ }
+
+ if (compare > 0) {
+ return absoluteSub(cx, x, y, xNegative);
+ }
+
+ return absoluteSub(cx, y, x, !xNegative);
+}
+
+// BigInt proposal section 1.1.4
+BigInt* BigInt::mul(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ if (x->isZero()) {
+ return x;
+ }
+ if (y->isZero()) {
+ return y;
+ }
+
+ bool resultNegative = x->isNegative() != y->isNegative();
+
+ // Fast path for the likely-common case of up to a uint64_t of magnitude.
+ if (x->absFitsInUint64() && y->absFitsInUint64()) {
+ uint64_t lhs = x->uint64FromAbsNonZero();
+ uint64_t rhs = y->uint64FromAbsNonZero();
+
+ uint64_t res;
+ if (js::SafeMul(lhs, rhs, &res)) {
+ MOZ_ASSERT(res != 0);
+ return createFromNonZeroRawUint64(cx, res, resultNegative);
+ }
+ }
+
+ unsigned resultLength = x->digitLength() + y->digitLength();
+ BigInt* result = createUninitialized(cx, resultLength, resultNegative);
+ if (!result) {
+ return nullptr;
+ }
+ result->initializeDigitsToZero();
+
+ for (size_t i = 0; i < x->digitLength(); i++) {
+ multiplyAccumulate(y, x->digit(i), result, i);
+ }
+
+ return destructivelyTrimHighZeroDigits(cx, result);
+}
+
+// BigInt proposal section 1.1.5
+BigInt* BigInt::div(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ // 1. If y is 0n, throw a RangeError exception.
+ if (y->isZero()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BIGINT_DIVISION_BY_ZERO);
+ return nullptr;
+ }
+
+ // 2. Let quotient be the mathematical value of x divided by y.
+ // 3. Return a BigInt representing quotient rounded towards 0 to the next
+ // integral value.
+ if (x->isZero()) {
+ return x;
+ }
+
+ if (absoluteCompare(x, y) < 0) {
+ return zero(cx);
+ }
+
+ RootedBigInt quotient(cx);
+ bool resultNegative = x->isNegative() != y->isNegative();
+ if (y->digitLength() == 1) {
+ Digit divisor = y->digit(0);
+ if (divisor == 1) {
+ return resultNegative == x->isNegative() ? x : neg(cx, x);
+ }
+
+ Digit remainder;
+ if (!absoluteDivWithDigitDivisor(cx, x, divisor, Some(&quotient),
+ &remainder, resultNegative)) {
+ return nullptr;
+ }
+ } else {
+ if (!absoluteDivWithBigIntDivisor(cx, x, y, Some(&quotient), Nothing(),
+ resultNegative)) {
+ return nullptr;
+ }
+ }
+
+ return destructivelyTrimHighZeroDigits(cx, quotient);
+}
+
+// BigInt proposal section 1.1.6
+BigInt* BigInt::mod(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ // 1. If y is 0n, throw a RangeError exception.
+ if (y->isZero()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BIGINT_DIVISION_BY_ZERO);
+ return nullptr;
+ }
+
+ // 2. If x is 0n, return x.
+ if (x->isZero()) {
+ return x;
+ }
+ // 3. Let r be the BigInt defined by the mathematical relation r = x - (y ×
+ // q) where q is a BigInt that is negative only if x/y is negative and
+ // positive only if x/y is positive, and whose magnitude is as large as
+ // possible without exceeding the magnitude of the true mathematical
+ // quotient of x and y.
+ if (absoluteCompare(x, y) < 0) {
+ return x;
+ }
+
+ if (y->digitLength() == 1) {
+ Digit divisor = y->digit(0);
+ if (divisor == 1) {
+ return zero(cx);
+ }
+
+ Digit remainderDigit;
+ bool unusedQuotientNegative = false;
+ if (!absoluteDivWithDigitDivisor(cx, x, divisor, Nothing(), &remainderDigit,
+ unusedQuotientNegative)) {
+ MOZ_CRASH("BigInt div by digit failed unexpectedly");
+ }
+
+ if (!remainderDigit) {
+ return zero(cx);
+ }
+
+ return createFromDigit(cx, remainderDigit, x->isNegative());
+ } else {
+ RootedBigInt remainder(cx);
+ if (!absoluteDivWithBigIntDivisor(cx, x, y, Nothing(), Some(&remainder),
+ x->isNegative())) {
+ return nullptr;
+ }
+ MOZ_ASSERT(remainder);
+ return destructivelyTrimHighZeroDigits(cx, remainder);
+ }
+}
+
+// BigInt proposal section 1.1.3
+BigInt* BigInt::pow(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ // 1. If exponent is < 0, throw a RangeError exception.
+ if (y->isNegative()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BIGINT_NEGATIVE_EXPONENT);
+ return nullptr;
+ }
+
+ // 2. If base is 0n and exponent is 0n, return 1n.
+ if (y->isZero()) {
+ return one(cx);
+ }
+
+ if (x->isZero()) {
+ return x;
+ }
+
+ // 3. Return a BigInt representing the mathematical value of base raised
+ // to the power exponent.
+ if (x->digitLength() == 1 && x->digit(0) == 1) {
+ // (-1) ** even_number == 1.
+ if (x->isNegative() && (y->digit(0) & 1) == 0) {
+ return neg(cx, x);
+ }
+ // (-1) ** odd_number == -1; 1 ** anything == 1.
+ return x;
+ }
+
+ // For all bases >= 2, very large exponents would lead to unrepresentable
+ // results.
+ static_assert(MaxBitLength < std::numeric_limits<Digit>::max(),
+ "unexpectedly large MaxBitLength");
+ if (y->digitLength() > 1) {
+ ReportOversizedAllocation(cx, JSMSG_BIGINT_TOO_LARGE);
+ return nullptr;
+ }
+ Digit exponent = y->digit(0);
+ if (exponent == 1) {
+ return x;
+ }
+ if (exponent >= MaxBitLength) {
+ ReportOversizedAllocation(cx, JSMSG_BIGINT_TOO_LARGE);
+ return nullptr;
+ }
+
+ static_assert(MaxBitLength <= std::numeric_limits<int>::max(),
+ "unexpectedly large MaxBitLength");
+ int n = static_cast<int>(exponent);
+ bool isOddPower = n & 1;
+
+ if (x->digitLength() == 1 && mozilla::IsPowerOfTwo(x->digit(0))) {
+ // Fast path for (2^m)^n.
+
+ // Result is negative for odd powers.
+ bool resultNegative = x->isNegative() && isOddPower;
+
+ unsigned m = mozilla::FloorLog2(x->digit(0));
+ MOZ_ASSERT(m < DigitBits);
+
+ static_assert(MaxBitLength * DigitBits > MaxBitLength,
+ "n * m can't overflow");
+ n *= int(m);
+
+ int length = 1 + (n / DigitBits);
+ BigInt* result = createUninitialized(cx, length, resultNegative);
+ if (!result) {
+ return nullptr;
+ }
+ result->initializeDigitsToZero();
+ result->setDigit(length - 1, static_cast<Digit>(1) << (n % DigitBits));
+ return result;
+ }
+
+ RootedBigInt runningSquare(cx, x);
+ RootedBigInt result(cx, isOddPower ? x : nullptr);
+ n /= 2;
+
+ // Fast path for the likely-common case of up to a uint64_t of magnitude.
+ if (x->absFitsInUint64()) {
+ bool resultNegative = x->isNegative() && isOddPower;
+
+ uint64_t runningSquareInt = x->uint64FromAbsNonZero();
+ uint64_t resultInt = isOddPower ? runningSquareInt : 1;
+ while (true) {
+ uint64_t runningSquareStart = runningSquareInt;
+ uint64_t r;
+ if (!js::SafeMul(runningSquareInt, runningSquareInt, &r)) {
+ break;
+ }
+ runningSquareInt = r;
+
+ if (n & 1) {
+ if (!js::SafeMul(resultInt, runningSquareInt, &r)) {
+ // Recover |runningSquare| before we restart the loop.
+ runningSquareInt = runningSquareStart;
+ break;
+ }
+ resultInt = r;
+ }
+
+ n /= 2;
+ if (n == 0) {
+ return createFromNonZeroRawUint64(cx, resultInt, resultNegative);
+ }
+ }
+
+ runningSquare = createFromNonZeroRawUint64(cx, runningSquareInt, false);
+ if (!runningSquare) {
+ return nullptr;
+ }
+
+ result = createFromNonZeroRawUint64(cx, resultInt, resultNegative);
+ if (!result) {
+ return nullptr;
+ }
+ }
+
+ // This implicitly sets the result's sign correctly.
+ while (true) {
+ runningSquare = mul(cx, runningSquare, runningSquare);
+ if (!runningSquare) {
+ return nullptr;
+ }
+
+ if (n & 1) {
+ if (!result) {
+ result = runningSquare;
+ } else {
+ result = mul(cx, result, runningSquare);
+ if (!result) {
+ return nullptr;
+ }
+ }
+ }
+
+ n /= 2;
+ if (n == 0) {
+ return result;
+ }
+ }
+}
+
+BigInt* BigInt::lshByAbsolute(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ if (x->isZero() || y->isZero()) {
+ return x;
+ }
+
+ if (y->digitLength() > 1 || y->digit(0) > MaxBitLength) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BIGINT_TOO_LARGE);
+ if (js::SupportDifferentialTesting()) {
+ fprintf(stderr, "ReportOutOfMemory called\n");
+ }
+ return nullptr;
+ }
+ Digit shift = y->digit(0);
+ int digitShift = static_cast<int>(shift / DigitBits);
+ int bitsShift = static_cast<int>(shift % DigitBits);
+ int length = x->digitLength();
+ bool grow = bitsShift && (x->digit(length - 1) >> (DigitBits - bitsShift));
+ int resultLength = length + digitShift + grow;
+ BigInt* result = createUninitialized(cx, resultLength, x->isNegative());
+ if (!result) {
+ return nullptr;
+ }
+
+ int i = 0;
+ for (; i < digitShift; i++) {
+ result->setDigit(i, 0);
+ }
+
+ if (bitsShift == 0) {
+ for (int j = 0; i < resultLength; i++, j++) {
+ result->setDigit(i, x->digit(j));
+ }
+ } else {
+ Digit carry = 0;
+ for (int j = 0; j < length; i++, j++) {
+ Digit d = x->digit(j);
+ result->setDigit(i, (d << bitsShift) | carry);
+ carry = d >> (DigitBits - bitsShift);
+ }
+ if (grow) {
+ result->setDigit(i, carry);
+ } else {
+ MOZ_ASSERT(!carry);
+ }
+ }
+ return result;
+}
+
+BigInt* BigInt::rshByMaximum(JSContext* cx, bool isNegative) {
+ return isNegative ? negativeOne(cx) : zero(cx);
+}
+
+BigInt* BigInt::rshByAbsolute(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ if (x->isZero() || y->isZero()) {
+ return x;
+ }
+
+ if (y->digitLength() > 1 || y->digit(0) >= MaxBitLength) {
+ return rshByMaximum(cx, x->isNegative());
+ }
+ Digit shift = y->digit(0);
+ int length = x->digitLength();
+ int digitShift = static_cast<int>(shift / DigitBits);
+ int bitsShift = static_cast<int>(shift % DigitBits);
+ int resultLength = length - digitShift;
+ if (resultLength <= 0) {
+ return rshByMaximum(cx, x->isNegative());
+ }
+ // For negative numbers, round down if any bit was shifted out (so that e.g.
+ // -5n >> 1n == -3n and not -2n). Check now whether this will happen and
+ // whether it can cause overflow into a new digit. If we allocate the result
+ // large enough up front, it avoids having to do a second allocation later.
+ bool mustRoundDown = false;
+ if (x->isNegative()) {
+ const Digit mask = (static_cast<Digit>(1) << bitsShift) - 1;
+ if ((x->digit(digitShift) & mask)) {
+ mustRoundDown = true;
+ } else {
+ for (int i = 0; i < digitShift; i++) {
+ if (x->digit(i)) {
+ mustRoundDown = true;
+ break;
+ }
+ }
+ }
+ }
+ // If bits_shift is non-zero, it frees up bits, preventing overflow.
+ if (mustRoundDown && bitsShift == 0) {
+ // Overflow cannot happen if the most significant digit has unset bits.
+ Digit msd = x->digit(length - 1);
+ bool roundingCanOverflow = msd == std::numeric_limits<Digit>::max();
+ if (roundingCanOverflow) {
+ resultLength++;
+ }
+ }
+
+ MOZ_ASSERT(resultLength <= length);
+ RootedBigInt result(cx,
+ createUninitialized(cx, resultLength, x->isNegative()));
+ if (!result) {
+ return nullptr;
+ }
+ if (!bitsShift) {
+ // If roundingCanOverflow, manually initialize the overflow digit.
+ result->setDigit(resultLength - 1, 0);
+ for (int i = digitShift; i < length; i++) {
+ result->setDigit(i - digitShift, x->digit(i));
+ }
+ } else {
+ Digit carry = x->digit(digitShift) >> bitsShift;
+ int last = length - digitShift - 1;
+ for (int i = 0; i < last; i++) {
+ Digit d = x->digit(i + digitShift + 1);
+ result->setDigit(i, (d << (DigitBits - bitsShift)) | carry);
+ carry = d >> bitsShift;
+ }
+ result->setDigit(last, carry);
+ }
+
+ if (mustRoundDown) {
+ MOZ_ASSERT(x->isNegative());
+ // Since the result is negative, rounding down means adding one to
+ // its absolute value. This cannot overflow. TODO: modify the result in
+ // place.
+ return absoluteAddOne(cx, result, x->isNegative());
+ }
+ return destructivelyTrimHighZeroDigits(cx, result);
+}
+
+// BigInt proposal section 1.1.9. BigInt::leftShift ( x, y )
+BigInt* BigInt::lsh(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ if (y->isNegative()) {
+ return rshByAbsolute(cx, x, y);
+ }
+ return lshByAbsolute(cx, x, y);
+}
+
+// BigInt proposal section 1.1.10. BigInt::signedRightShift ( x, y )
+BigInt* BigInt::rsh(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ if (y->isNegative()) {
+ return lshByAbsolute(cx, x, y);
+ }
+ return rshByAbsolute(cx, x, y);
+}
+
+// BigInt proposal section 1.1.17. BigInt::bitwiseAND ( x, y )
+BigInt* BigInt::bitAnd(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ if (x->isZero()) {
+ return x;
+ }
+
+ if (y->isZero()) {
+ return y;
+ }
+
+ if (!x->isNegative() && !y->isNegative()) {
+ return absoluteAnd(cx, x, y);
+ }
+
+ if (x->isNegative() && y->isNegative()) {
+ // (-x) & (-y) == ~(x-1) & ~(y-1) == ~((x-1) | (y-1))
+ // == -(((x-1) | (y-1)) + 1)
+ RootedBigInt x1(cx, absoluteSubOne(cx, x));
+ if (!x1) {
+ return nullptr;
+ }
+ RootedBigInt y1(cx, absoluteSubOne(cx, y));
+ if (!y1) {
+ return nullptr;
+ }
+ RootedBigInt result(cx, absoluteOr(cx, x1, y1));
+ if (!result) {
+ return nullptr;
+ }
+ bool resultNegative = true;
+ return absoluteAddOne(cx, result, resultNegative);
+ }
+
+ MOZ_ASSERT(x->isNegative() != y->isNegative());
+ HandleBigInt& pos = x->isNegative() ? y : x;
+ HandleBigInt& neg = x->isNegative() ? x : y;
+
+ RootedBigInt neg1(cx, absoluteSubOne(cx, neg));
+ if (!neg1) {
+ return nullptr;
+ }
+
+ // x & (-y) == x & ~(y-1) == x & ~(y-1)
+ return absoluteAndNot(cx, pos, neg1);
+}
+
+// BigInt proposal section 1.1.18. BigInt::bitwiseXOR ( x, y )
+BigInt* BigInt::bitXor(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ if (x->isZero()) {
+ return y;
+ }
+
+ if (y->isZero()) {
+ return x;
+ }
+
+ if (!x->isNegative() && !y->isNegative()) {
+ return absoluteXor(cx, x, y);
+ }
+
+ if (x->isNegative() && y->isNegative()) {
+ // (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1)
+ RootedBigInt x1(cx, absoluteSubOne(cx, x));
+ if (!x1) {
+ return nullptr;
+ }
+ RootedBigInt y1(cx, absoluteSubOne(cx, y));
+ if (!y1) {
+ return nullptr;
+ }
+ return absoluteXor(cx, x1, y1);
+ }
+ MOZ_ASSERT(x->isNegative() != y->isNegative());
+
+ HandleBigInt& pos = x->isNegative() ? y : x;
+ HandleBigInt& neg = x->isNegative() ? x : y;
+
+ // x ^ (-y) == x ^ ~(y-1) == ~(x ^ (y-1)) == -((x ^ (y-1)) + 1)
+ RootedBigInt result(cx, absoluteSubOne(cx, neg));
+ if (!result) {
+ return nullptr;
+ }
+ result = absoluteXor(cx, result, pos);
+ if (!result) {
+ return nullptr;
+ }
+ bool resultNegative = true;
+ return absoluteAddOne(cx, result, resultNegative);
+}
+
+// BigInt proposal section 1.1.19. BigInt::bitwiseOR ( x, y )
+BigInt* BigInt::bitOr(JSContext* cx, HandleBigInt x, HandleBigInt y) {
+ if (x->isZero()) {
+ return y;
+ }
+
+ if (y->isZero()) {
+ return x;
+ }
+
+ bool resultNegative = x->isNegative() || y->isNegative();
+
+ if (!resultNegative) {
+ return absoluteOr(cx, x, y);
+ }
+
+ if (x->isNegative() && y->isNegative()) {
+ // (-x) | (-y) == ~(x-1) | ~(y-1) == ~((x-1) & (y-1))
+ // == -(((x-1) & (y-1)) + 1)
+ RootedBigInt result(cx, absoluteSubOne(cx, x));
+ if (!result) {
+ return nullptr;
+ }
+ RootedBigInt y1(cx, absoluteSubOne(cx, y));
+ if (!y1) {
+ return nullptr;
+ }
+ result = absoluteAnd(cx, result, y1);
+ if (!result) {
+ return nullptr;
+ }
+ return absoluteAddOne(cx, result, resultNegative);
+ }
+
+ MOZ_ASSERT(x->isNegative() != y->isNegative());
+ HandleBigInt& pos = x->isNegative() ? y : x;
+ HandleBigInt& neg = x->isNegative() ? x : y;
+
+ // x | (-y) == x | ~(y-1) == ~((y-1) &~ x) == -(((y-1) &~ x) + 1)
+ RootedBigInt result(cx, absoluteSubOne(cx, neg));
+ if (!result) {
+ return nullptr;
+ }
+ result = absoluteAndNot(cx, result, pos);
+ if (!result) {
+ return nullptr;
+ }
+ return absoluteAddOne(cx, result, resultNegative);
+}
+
+// BigInt proposal section 1.1.2. BigInt::bitwiseNOT ( x )
+BigInt* BigInt::bitNot(JSContext* cx, HandleBigInt x) {
+ if (x->isNegative()) {
+ // ~(-x) == ~(~(x-1)) == x-1
+ return absoluteSubOne(cx, x);
+ } else {
+ // ~x == -x-1 == -(x+1)
+ bool resultNegative = true;
+ return absoluteAddOne(cx, x, resultNegative);
+ }
+}
+
+int64_t BigInt::toInt64(const BigInt* x) { return WrapToSigned(toUint64(x)); }
+
+uint64_t BigInt::toUint64(const BigInt* x) {
+ if (x->isZero()) {
+ return 0;
+ }
+
+ uint64_t digit = x->uint64FromAbsNonZero();
+
+ // Return the two's complement if x is negative.
+ if (x->isNegative()) {
+ return ~(digit - 1);
+ }
+
+ return digit;
+}
+
+bool BigInt::isInt64(BigInt* x, int64_t* result) {
+ MOZ_MAKE_MEM_UNDEFINED(result, sizeof(*result));
+
+ if (!x->absFitsInUint64()) {
+ return false;
+ }
+
+ if (x->isZero()) {
+ *result = 0;
+ return true;
+ }
+
+ uint64_t magnitude = x->uint64FromAbsNonZero();
+
+ if (x->isNegative()) {
+ constexpr uint64_t Int64MinMagnitude = uint64_t(1) << 63;
+ if (magnitude <= Int64MinMagnitude) {
+ *result = magnitude == Int64MinMagnitude
+ ? std::numeric_limits<int64_t>::min()
+ : -AssertedCast<int64_t>(magnitude);
+ return true;
+ }
+ } else {
+ if (magnitude <=
+ static_cast<uint64_t>(std::numeric_limits<int64_t>::max())) {
+ *result = AssertedCast<int64_t>(magnitude);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool BigInt::isUint64(BigInt* x, uint64_t* result) {
+ MOZ_MAKE_MEM_UNDEFINED(result, sizeof(*result));
+
+ if (!x->absFitsInUint64() || x->isNegative()) {
+ return false;
+ }
+
+ if (x->isZero()) {
+ *result = 0;
+ return true;
+ }
+
+ *result = x->uint64FromAbsNonZero();
+ return true;
+}
+
+bool BigInt::isNumber(BigInt* x, double* result) {
+ MOZ_MAKE_MEM_UNDEFINED(result, sizeof(*result));
+
+ if (!x->absFitsInUint64()) {
+ return false;
+ }
+
+ if (x->isZero()) {
+ *result = 0;
+ return true;
+ }
+
+ uint64_t magnitude = x->uint64FromAbsNonZero();
+ if (magnitude < uint64_t(DOUBLE_INTEGRAL_PRECISION_LIMIT)) {
+ *result = x->isNegative() ? -double(magnitude) : double(magnitude);
+ return true;
+ }
+
+ return false;
+}
+
+// Compute `2**bits - (x & (2**bits - 1))`. Used when treating BigInt values as
+// arbitrary-precision two's complement signed integers.
+BigInt* BigInt::truncateAndSubFromPowerOfTwo(JSContext* cx, HandleBigInt x,
+ uint64_t bits,
+ bool resultNegative) {
+ MOZ_ASSERT(bits != 0);
+ MOZ_ASSERT(!x->isZero());
+
+ if (bits > MaxBitLength) {
+ ReportOversizedAllocation(cx, JSMSG_BIGINT_TOO_LARGE);
+ return nullptr;
+ }
+
+ size_t resultLength = CeilDiv(bits, DigitBits);
+ BigInt* result = createUninitialized(cx, resultLength, resultNegative);
+ if (!result) {
+ return nullptr;
+ }
+
+ // Process all digits except the MSD.
+ size_t xLength = x->digitLength();
+ Digit borrow = 0;
+ // Take digits from `x` until its length is exhausted.
+ for (size_t i = 0; i < std::min(resultLength - 1, xLength); i++) {
+ Digit newBorrow = 0;
+ Digit difference = digitSub(0, x->digit(i), &newBorrow);
+ difference = digitSub(difference, borrow, &newBorrow);
+ result->setDigit(i, difference);
+ borrow = newBorrow;
+ }
+ // Then simulate leading zeroes in `x` as needed.
+ for (size_t i = xLength; i < resultLength - 1; i++) {
+ Digit newBorrow = 0;
+ Digit difference = digitSub(0, borrow, &newBorrow);
+ result->setDigit(i, difference);
+ borrow = newBorrow;
+ }
+
+ // The MSD might contain extra bits that we don't want.
+ Digit xMSD = resultLength <= xLength ? x->digit(resultLength - 1) : 0;
+ Digit resultMSD;
+ if (bits % DigitBits == 0) {
+ Digit newBorrow = 0;
+ resultMSD = digitSub(0, xMSD, &newBorrow);
+ resultMSD = digitSub(resultMSD, borrow, &newBorrow);
+ } else {
+ size_t drop = DigitBits - (bits % DigitBits);
+ xMSD = (xMSD << drop) >> drop;
+ Digit minuendMSD = Digit(1) << (DigitBits - drop);
+ Digit newBorrow = 0;
+ resultMSD = digitSub(minuendMSD, xMSD, &newBorrow);
+ resultMSD = digitSub(resultMSD, borrow, &newBorrow);
+ MOZ_ASSERT(newBorrow == 0, "result < 2^bits");
+ // If all subtracted bits were zero, we have to get rid of the
+ // materialized minuendMSD again.
+ resultMSD &= (minuendMSD - 1);
+ }
+ result->setDigit(resultLength - 1, resultMSD);
+
+ return destructivelyTrimHighZeroDigits(cx, result);
+}
+
+BigInt* BigInt::asUintN(JSContext* cx, HandleBigInt x, uint64_t bits) {
+ if (x->isZero()) {
+ return x;
+ }
+
+ if (bits == 0) {
+ return zero(cx);
+ }
+
+ // When truncating a negative number, simulate two's complement.
+ if (x->isNegative()) {
+ bool resultNegative = false;
+ return truncateAndSubFromPowerOfTwo(cx, x, bits, resultNegative);
+ }
+
+ if (bits <= 64) {
+ uint64_t u64 = toUint64(x);
+ uint64_t mask = uint64_t(-1) >> (64 - bits);
+ uint64_t n = u64 & mask;
+ if (u64 == n && x->absFitsInUint64()) {
+ return x;
+ }
+ return createFromUint64(cx, n);
+ }
+
+ if (bits >= MaxBitLength) {
+ return x;
+ }
+
+ Digit msd = x->digit(x->digitLength() - 1);
+ size_t msdBits = DigitBits - DigitLeadingZeroes(msd);
+ size_t bitLength = msdBits + (x->digitLength() - 1) * DigitBits;
+
+ if (bits >= bitLength) {
+ return x;
+ }
+
+ size_t length = CeilDiv(bits, DigitBits);
+ MOZ_ASSERT(length >= 2, "single-digit cases should be handled above");
+ MOZ_ASSERT(length <= x->digitLength());
+
+ // Eagerly trim high zero digits.
+ const size_t highDigitBits = ((bits - 1) % DigitBits) + 1;
+ const Digit highDigitMask = Digit(-1) >> (DigitBits - highDigitBits);
+ Digit mask = highDigitMask;
+ while (length > 0) {
+ if (x->digit(length - 1) & mask) {
+ break;
+ }
+
+ mask = Digit(-1);
+ length--;
+ }
+
+ const bool isNegative = false;
+ BigInt* res = createUninitialized(cx, length, isNegative);
+ if (res == nullptr) {
+ return nullptr;
+ }
+
+ while (length-- > 0) {
+ res->setDigit(length, x->digit(length) & mask);
+ mask = Digit(-1);
+ }
+ MOZ_ASSERT_IF(length == 0, res->isZero());
+
+ return res;
+}
+
+BigInt* BigInt::asIntN(JSContext* cx, HandleBigInt x, uint64_t bits) {
+ if (x->isZero()) {
+ return x;
+ }
+
+ if (bits == 0) {
+ return zero(cx);
+ }
+
+ if (bits == 64) {
+ int64_t n = toInt64(x);
+ if (((n < 0) == x->isNegative()) && x->absFitsInUint64()) {
+ return x;
+ }
+ return createFromInt64(cx, n);
+ }
+
+ if (bits > MaxBitLength) {
+ return x;
+ }
+
+ Digit msd = x->digit(x->digitLength() - 1);
+ size_t msdBits = DigitBits - DigitLeadingZeroes(msd);
+ size_t bitLength = msdBits + (x->digitLength() - 1) * DigitBits;
+
+ if (bits > bitLength) {
+ return x;
+ }
+
+ Digit signBit = Digit(1) << ((bits - 1) % DigitBits);
+ if (bits == bitLength && msd < signBit) {
+ return x;
+ }
+
+ // All the cases above were the trivial cases: truncating zero, or to zero
+ // bits, or to more bits than are in `x` (so we return `x` directly), or we
+ // already have the 64-bit fast path. If we get here, follow the textbook
+ // algorithm from the specification.
+
+ // BigInt.asIntN step 3: Let `mod` be `x` modulo `2**bits`.
+ RootedBigInt mod(cx, asUintN(cx, x, bits));
+ if (!mod) {
+ return nullptr;
+ }
+
+ // Step 4: If `mod >= 2**(bits - 1)`, return `mod - 2**bits`; otherwise,
+ // return `mod`.
+ if (mod->digitLength() == CeilDiv(bits, DigitBits)) {
+ MOZ_ASSERT(!mod->isZero(),
+ "nonzero bits implies nonzero digit length which implies "
+ "nonzero overall");
+
+ if ((mod->digit(mod->digitLength() - 1) & signBit) != 0) {
+ bool resultNegative = true;
+ return truncateAndSubFromPowerOfTwo(cx, mod, bits, resultNegative);
+ }
+ }
+
+ return mod;
+}
+
+static bool ValidBigIntOperands(JSContext* cx, HandleValue lhs,
+ HandleValue rhs) {
+ MOZ_ASSERT(lhs.isBigInt() || rhs.isBigInt());
+
+ if (!lhs.isBigInt() || !rhs.isBigInt()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BIGINT_TO_NUMBER);
+ return false;
+ }
+
+ return true;
+}
+
+bool BigInt::addValue(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ if (!ValidBigIntOperands(cx, lhs, rhs)) {
+ return false;
+ }
+
+ RootedBigInt lhsBigInt(cx, lhs.toBigInt());
+ RootedBigInt rhsBigInt(cx, rhs.toBigInt());
+ BigInt* resBigInt = BigInt::add(cx, lhsBigInt, rhsBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::subValue(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ if (!ValidBigIntOperands(cx, lhs, rhs)) {
+ return false;
+ }
+
+ RootedBigInt lhsBigInt(cx, lhs.toBigInt());
+ RootedBigInt rhsBigInt(cx, rhs.toBigInt());
+ BigInt* resBigInt = BigInt::sub(cx, lhsBigInt, rhsBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::mulValue(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ if (!ValidBigIntOperands(cx, lhs, rhs)) {
+ return false;
+ }
+
+ RootedBigInt lhsBigInt(cx, lhs.toBigInt());
+ RootedBigInt rhsBigInt(cx, rhs.toBigInt());
+ BigInt* resBigInt = BigInt::mul(cx, lhsBigInt, rhsBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::divValue(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ if (!ValidBigIntOperands(cx, lhs, rhs)) {
+ return false;
+ }
+
+ RootedBigInt lhsBigInt(cx, lhs.toBigInt());
+ RootedBigInt rhsBigInt(cx, rhs.toBigInt());
+ BigInt* resBigInt = BigInt::div(cx, lhsBigInt, rhsBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::modValue(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ if (!ValidBigIntOperands(cx, lhs, rhs)) {
+ return false;
+ }
+
+ RootedBigInt lhsBigInt(cx, lhs.toBigInt());
+ RootedBigInt rhsBigInt(cx, rhs.toBigInt());
+ BigInt* resBigInt = BigInt::mod(cx, lhsBigInt, rhsBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::powValue(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ if (!ValidBigIntOperands(cx, lhs, rhs)) {
+ return false;
+ }
+
+ RootedBigInt lhsBigInt(cx, lhs.toBigInt());
+ RootedBigInt rhsBigInt(cx, rhs.toBigInt());
+ BigInt* resBigInt = BigInt::pow(cx, lhsBigInt, rhsBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::negValue(JSContext* cx, HandleValue operand,
+ MutableHandleValue res) {
+ MOZ_ASSERT(operand.isBigInt());
+
+ RootedBigInt operandBigInt(cx, operand.toBigInt());
+ BigInt* resBigInt = BigInt::neg(cx, operandBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::incValue(JSContext* cx, HandleValue operand,
+ MutableHandleValue res) {
+ MOZ_ASSERT(operand.isBigInt());
+
+ RootedBigInt operandBigInt(cx, operand.toBigInt());
+ BigInt* resBigInt = BigInt::inc(cx, operandBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::decValue(JSContext* cx, HandleValue operand,
+ MutableHandleValue res) {
+ MOZ_ASSERT(operand.isBigInt());
+
+ RootedBigInt operandBigInt(cx, operand.toBigInt());
+ BigInt* resBigInt = BigInt::dec(cx, operandBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::lshValue(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ if (!ValidBigIntOperands(cx, lhs, rhs)) {
+ return false;
+ }
+
+ RootedBigInt lhsBigInt(cx, lhs.toBigInt());
+ RootedBigInt rhsBigInt(cx, rhs.toBigInt());
+ BigInt* resBigInt = BigInt::lsh(cx, lhsBigInt, rhsBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::rshValue(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ if (!ValidBigIntOperands(cx, lhs, rhs)) {
+ return false;
+ }
+
+ RootedBigInt lhsBigInt(cx, lhs.toBigInt());
+ RootedBigInt rhsBigInt(cx, rhs.toBigInt());
+ BigInt* resBigInt = BigInt::rsh(cx, lhsBigInt, rhsBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::bitAndValue(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ if (!ValidBigIntOperands(cx, lhs, rhs)) {
+ return false;
+ }
+
+ RootedBigInt lhsBigInt(cx, lhs.toBigInt());
+ RootedBigInt rhsBigInt(cx, rhs.toBigInt());
+ BigInt* resBigInt = BigInt::bitAnd(cx, lhsBigInt, rhsBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::bitXorValue(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ if (!ValidBigIntOperands(cx, lhs, rhs)) {
+ return false;
+ }
+
+ RootedBigInt lhsBigInt(cx, lhs.toBigInt());
+ RootedBigInt rhsBigInt(cx, rhs.toBigInt());
+ BigInt* resBigInt = BigInt::bitXor(cx, lhsBigInt, rhsBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::bitOrValue(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ MutableHandleValue res) {
+ if (!ValidBigIntOperands(cx, lhs, rhs)) {
+ return false;
+ }
+
+ RootedBigInt lhsBigInt(cx, lhs.toBigInt());
+ RootedBigInt rhsBigInt(cx, rhs.toBigInt());
+ BigInt* resBigInt = BigInt::bitOr(cx, lhsBigInt, rhsBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+bool BigInt::bitNotValue(JSContext* cx, HandleValue operand,
+ MutableHandleValue res) {
+ MOZ_ASSERT(operand.isBigInt());
+
+ RootedBigInt operandBigInt(cx, operand.toBigInt());
+ BigInt* resBigInt = BigInt::bitNot(cx, operandBigInt);
+ if (!resBigInt) {
+ return false;
+ }
+ res.setBigInt(resBigInt);
+ return true;
+}
+
+// BigInt proposal section 7.3
+BigInt* js::ToBigInt(JSContext* cx, HandleValue val) {
+ RootedValue v(cx, val);
+
+ // Step 1.
+ if (!ToPrimitive(cx, JSTYPE_NUMBER, &v)) {
+ return nullptr;
+ }
+
+ // Step 2.
+ if (v.isBigInt()) {
+ return v.toBigInt();
+ }
+
+ if (v.isBoolean()) {
+ return v.toBoolean() ? BigInt::one(cx) : BigInt::zero(cx);
+ }
+
+ if (v.isString()) {
+ RootedString str(cx, v.toString());
+ BigInt* bi;
+ JS_TRY_VAR_OR_RETURN_NULL(cx, bi, StringToBigInt(cx, str));
+ if (!bi) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BIGINT_INVALID_SYNTAX);
+ return nullptr;
+ }
+ return bi;
+ }
+
+ ReportValueError(cx, JSMSG_CANT_CONVERT_TO, JSDVG_IGNORE_STACK, v, nullptr,
+ "BigInt");
+ return nullptr;
+}
+
+JS::Result<int64_t> js::ToBigInt64(JSContext* cx, HandleValue v) {
+ BigInt* bi = js::ToBigInt(cx, v);
+ if (!bi) {
+ return cx->alreadyReportedError();
+ }
+ return BigInt::toInt64(bi);
+}
+
+JS::Result<uint64_t> js::ToBigUint64(JSContext* cx, HandleValue v) {
+ BigInt* bi = js::ToBigInt(cx, v);
+ if (!bi) {
+ return cx->alreadyReportedError();
+ }
+ return BigInt::toUint64(bi);
+}
+
+double BigInt::numberValue(BigInt* x) {
+ if (x->isZero()) {
+ return 0.0;
+ }
+
+ using Double = mozilla::FloatingPoint<double>;
+ constexpr uint8_t ExponentShift = Double::kExponentShift;
+ constexpr uint8_t SignificandWidth = Double::kSignificandWidth;
+ constexpr unsigned ExponentBias = Double::kExponentBias;
+ constexpr uint8_t SignShift = Double::kExponentWidth + SignificandWidth;
+
+ MOZ_ASSERT(x->digitLength() > 0);
+
+ // Fast path for the likely-common case of up to a uint64_t of magnitude not
+ // exceeding integral precision in IEEE-754. (Note that we *depend* on this
+ // optimization being performed further down.)
+ if (x->absFitsInUint64()) {
+ uint64_t magnitude = x->uint64FromAbsNonZero();
+ const uint64_t MaxIntegralPrecisionDouble = uint64_t(1)
+ << (SignificandWidth + 1);
+ if (magnitude <= MaxIntegralPrecisionDouble) {
+ return x->isNegative() ? -double(magnitude) : +double(magnitude);
+ }
+ }
+
+ size_t length = x->digitLength();
+ Digit msd = x->digit(length - 1);
+ uint8_t msdLeadingZeroes = DigitLeadingZeroes(msd);
+
+ // `2**ExponentBias` is the largest power of two in a finite IEEE-754
+ // double. If this bigint has a greater power of two, it'll round to
+ // infinity.
+ uint64_t exponent = length * DigitBits - msdLeadingZeroes - 1;
+ if (exponent > ExponentBias) {
+ return x->isNegative() ? mozilla::NegativeInfinity<double>()
+ : mozilla::PositiveInfinity<double>();
+ }
+
+ // Otherwise munge the most significant bits of the number into proper
+ // position in an IEEE-754 double and go to town.
+
+ // Omit the most significant bit: the IEEE-754 format includes this bit
+ // implicitly for all double-precision integers.
+ const uint8_t msdIgnoredBits = msdLeadingZeroes + 1;
+ const uint8_t msdIncludedBits = DigitBits - msdIgnoredBits;
+
+ // We compute the final mantissa of the result, shifted upward to the top of
+ // the `uint64_t` space -- plus an extra bit to detect potential rounding.
+ constexpr uint8_t BitsNeededForShiftedMantissa = SignificandWidth + 1;
+
+ // Shift `msd`'s contributed bits upward to remove high-order zeroes and the
+ // highest set bit (which is implicit in IEEE-754 integral values so must be
+ // removed) and to add low-order zeroes. (Lower-order garbage bits are
+ // discarded when `shiftedMantissa` is converted to a real mantissa.)
+ uint64_t shiftedMantissa =
+ msdIncludedBits == 0 ? 0 : uint64_t(msd) << (64 - msdIncludedBits);
+
+ // If the extra bit is set, correctly rounding the result may require
+ // examining all lower-order bits. Also compute 1) the index of the Digit
+ // storing the extra bit, and 2) whether bits beneath the extra bit in that
+ // Digit are nonzero so we can round if needed.
+ size_t digitContainingExtraBit;
+ Digit bitsBeneathExtraBitInDigitContainingExtraBit;
+
+ // Add shifted bits to `shiftedMantissa` until we have a complete mantissa and
+ // an extra bit.
+ if (msdIncludedBits >= BitsNeededForShiftedMantissa) {
+ // DigitBits=64 (necessarily for msdIncludedBits ≥ SignificandWidth+1;
+ // | C++ compiler range analysis ought eliminate this
+ // | check on 32-bit)
+ // _________|__________
+ // / |
+ // msdIncludedBits
+ // ________|________
+ // / |
+ // [001···················|
+ // \_/\_____________/\__|
+ // | | |
+ // msdIgnoredBits | bits below the extra bit (may be no bits)
+ // BitsNeededForShiftedMantissa=SignificandWidth+1
+ digitContainingExtraBit = length - 1;
+
+ const uint8_t countOfBitsInDigitBelowExtraBit =
+ DigitBits - BitsNeededForShiftedMantissa - msdIgnoredBits;
+ bitsBeneathExtraBitInDigitContainingExtraBit =
+ msd & ((Digit(1) << countOfBitsInDigitBelowExtraBit) - 1);
+ } else {
+ MOZ_ASSERT(length >= 2,
+ "single-Digit numbers with this few bits should have been "
+ "handled by the fast-path above");
+
+ Digit second = x->digit(length - 2);
+ if (DigitBits == 64) {
+ shiftedMantissa |= second >> msdIncludedBits;
+
+ digitContainingExtraBit = length - 2;
+
+ // msdIncludedBits + DigitBits
+ // ________|_________
+ // / |
+ // DigitBits=64
+ // msdIncludedBits |
+ // __|___ _____|___
+ // / \ / |
+ // [001········|···········|
+ // \_/\_____________/\___|
+ // | | |
+ // msdIgnoredBits | bits below the extra bit (always more than one)
+ // |
+ // BitsNeededForShiftedMantissa=SignificandWidth+1
+ const uint8_t countOfBitsInSecondDigitBelowExtraBit =
+ (msdIncludedBits + DigitBits) - BitsNeededForShiftedMantissa;
+
+ bitsBeneathExtraBitInDigitContainingExtraBit =
+ second << (DigitBits - countOfBitsInSecondDigitBelowExtraBit);
+ } else {
+ shiftedMantissa |= uint64_t(second) << msdIgnoredBits;
+
+ if (msdIncludedBits + DigitBits >= BitsNeededForShiftedMantissa) {
+ digitContainingExtraBit = length - 2;
+
+ // msdIncludedBits + DigitBits
+ // ______|________
+ // / |
+ // DigitBits=32
+ // msdIncludedBits |
+ // _|_ _____|___
+ // / \ / |
+ // [001·····|···········|
+ // \___________/\__|
+ // | |
+ // | bits below the extra bit (may be no bits)
+ // BitsNeededForShiftedMantissa=SignificandWidth+1
+ const uint8_t countOfBitsInSecondDigitBelowExtraBit =
+ (msdIncludedBits + DigitBits) - BitsNeededForShiftedMantissa;
+
+ bitsBeneathExtraBitInDigitContainingExtraBit =
+ second & ((Digit(1) << countOfBitsInSecondDigitBelowExtraBit) - 1);
+ } else {
+ MOZ_ASSERT(length >= 3,
+ "we must have at least three digits here, because "
+ "`msdIncludedBits + 32 < BitsNeededForShiftedMantissa` "
+ "guarantees `x < 2**53` -- and therefore the "
+ "MaxIntegralPrecisionDouble optimization above will have "
+ "handled two-digit cases");
+
+ Digit third = x->digit(length - 3);
+ shiftedMantissa |= uint64_t(third) >> msdIncludedBits;
+
+ digitContainingExtraBit = length - 3;
+
+ // msdIncludedBits + DigitBits + DigitBits
+ // ____________|______________
+ // / |
+ // DigitBits=32
+ // msdIncludedBits | DigitBits=32
+ // _|_ _____|___ ____|____
+ // / \ / \ / |
+ // [001·····|···········|···········|
+ // \____________________/\_____|
+ // | |
+ // | bits below the extra bit
+ // BitsNeededForShiftedMantissa=SignificandWidth+1
+ static_assert(2 * DigitBits > BitsNeededForShiftedMantissa,
+ "two 32-bit digits should more than fill a mantissa");
+ const uint8_t countOfBitsInThirdDigitBelowExtraBit =
+ msdIncludedBits + 2 * DigitBits - BitsNeededForShiftedMantissa;
+
+ // Shift out the mantissa bits and the extra bit.
+ bitsBeneathExtraBitInDigitContainingExtraBit =
+ third << (DigitBits - countOfBitsInThirdDigitBelowExtraBit);
+ }
+ }
+ }
+
+ constexpr uint64_t LeastSignificantBit = uint64_t(1)
+ << (64 - SignificandWidth);
+ constexpr uint64_t ExtraBit = LeastSignificantBit >> 1;
+
+ // The extra bit must be set for rounding to change the mantissa.
+ if ((shiftedMantissa & ExtraBit) != 0) {
+ bool shouldRoundUp;
+ if (shiftedMantissa & LeastSignificantBit) {
+ // If the lowest mantissa bit is set, it doesn't matter what lower bits
+ // are: nearest-even rounds up regardless.
+ shouldRoundUp = true;
+ } else {
+ // If the lowest mantissa bit is unset, *all* lower bits are relevant.
+ // All-zero bits below the extra bit situates `x` halfway between two
+ // values, and the nearest *even* value lies downward. But if any bit
+ // below the extra bit is set, `x` is closer to the rounded-up value.
+ shouldRoundUp = bitsBeneathExtraBitInDigitContainingExtraBit != 0;
+ if (!shouldRoundUp) {
+ while (digitContainingExtraBit-- > 0) {
+ if (x->digit(digitContainingExtraBit) != 0) {
+ shouldRoundUp = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (shouldRoundUp) {
+ // Add one to the significand bits. If they overflow, the exponent must
+ // also be increased. If *that* overflows, return the correct infinity.
+ uint64_t before = shiftedMantissa;
+ shiftedMantissa += ExtraBit;
+ if (shiftedMantissa < before) {
+ exponent++;
+ if (exponent > ExponentBias) {
+ return x->isNegative() ? NegativeInfinity<double>()
+ : PositiveInfinity<double>();
+ }
+ }
+ }
+ }
+
+ uint64_t significandBits = shiftedMantissa >> (64 - SignificandWidth);
+ uint64_t signBit = uint64_t(x->isNegative() ? 1 : 0) << SignShift;
+ uint64_t exponentBits = (exponent + ExponentBias) << ExponentShift;
+ return mozilla::BitwiseCast<double>(signBit | exponentBits | significandBits);
+}
+
+int8_t BigInt::compare(BigInt* x, BigInt* y) {
+ // Sanity checks to catch negative zeroes escaping to the wild.
+ MOZ_ASSERT(!x->isNegative() || !x->isZero());
+ MOZ_ASSERT(!y->isNegative() || !y->isZero());
+
+ bool xSign = x->isNegative();
+
+ if (xSign != y->isNegative()) {
+ return xSign ? -1 : 1;
+ }
+
+ if (xSign) {
+ std::swap(x, y);
+ }
+
+ return absoluteCompare(x, y);
+}
+
+bool BigInt::equal(BigInt* lhs, BigInt* rhs) {
+ if (lhs == rhs) {
+ return true;
+ }
+ if (lhs->digitLength() != rhs->digitLength()) {
+ return false;
+ }
+ if (lhs->isNegative() != rhs->isNegative()) {
+ return false;
+ }
+ for (size_t i = 0; i < lhs->digitLength(); i++) {
+ if (lhs->digit(i) != rhs->digit(i)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+int8_t BigInt::compare(BigInt* x, double y) {
+ MOZ_ASSERT(!std::isnan(y));
+
+ constexpr int LessThan = -1, Equal = 0, GreaterThan = 1;
+
+ // ±Infinity exceeds a finite bigint value.
+ if (!std::isfinite(y)) {
+ return y > 0 ? LessThan : GreaterThan;
+ }
+
+ // Handle `x === 0n` and `y == 0` special cases.
+ if (x->isZero()) {
+ if (y == 0) {
+ // -0 and +0 are treated identically.
+ return Equal;
+ }
+
+ return y > 0 ? LessThan : GreaterThan;
+ }
+
+ const bool xNegative = x->isNegative();
+ if (y == 0) {
+ return xNegative ? LessThan : GreaterThan;
+ }
+
+ // Nonzero `x` and `y` with different signs are trivially compared.
+ const bool yNegative = y < 0;
+ if (xNegative != yNegative) {
+ return xNegative ? LessThan : GreaterThan;
+ }
+
+ // `x` and `y` are same-signed. Determine which has greater magnitude,
+ // then combine that with the signedness just computed to reach a result.
+ const int exponent = mozilla::ExponentComponent(y);
+ if (exponent < 0) {
+ // `y` is a nonzero fraction of magnitude less than 1.
+ return xNegative ? LessThan : GreaterThan;
+ }
+
+ size_t xLength = x->digitLength();
+ MOZ_ASSERT(xLength > 0);
+
+ Digit xMSD = x->digit(xLength - 1);
+ const int shift = DigitLeadingZeroes(xMSD);
+ int xBitLength = xLength * DigitBits - shift;
+
+ // Differing bit-length makes for a simple comparison.
+ int yBitLength = exponent + 1;
+ if (xBitLength < yBitLength) {
+ return xNegative ? GreaterThan : LessThan;
+ }
+ if (xBitLength > yBitLength) {
+ return xNegative ? LessThan : GreaterThan;
+ }
+
+ // Compare the high 64 bits of both numbers. (Lower-order bits not present
+ // in either number are zeroed.) Either that distinguishes `x` and `y`, or
+ // `x` and `y` differ only if a subsequent nonzero bit in `x` means `x` has
+ // larger magnitude.
+
+ using Double = mozilla::FloatingPoint<double>;
+ constexpr uint8_t SignificandWidth = Double::kSignificandWidth;
+ constexpr uint64_t SignificandBits = Double::kSignificandBits;
+
+ const uint64_t doubleBits = mozilla::BitwiseCast<uint64_t>(y);
+ const uint64_t significandBits = doubleBits & SignificandBits;
+
+ // Readd the implicit-one bit when constructing `y`'s high 64 bits.
+ const uint64_t yHigh64Bits =
+ ((uint64_t(1) << SignificandWidth) | significandBits)
+ << (64 - SignificandWidth - 1);
+
+ // Cons up `x`'s high 64 bits, backfilling zeroes for binary fractions of 1
+ // if `x` doesn't have 64 bits.
+ uint8_t xBitsFilled = DigitBits - shift;
+ uint64_t xHigh64Bits = uint64_t(xMSD) << (64 - xBitsFilled);
+
+ // At this point we no longer need to look at the most significant digit.
+ xLength--;
+
+ // The high 64 bits from `x` will probably not align to a digit boundary.
+ // `xHasNonZeroLeftoverBits` will be set to true if any remaining
+ // least-significant bit from the digit holding xHigh64Bits's
+ // least-significant bit is nonzero.
+ bool xHasNonZeroLeftoverBits = false;
+
+ if (xBitsFilled < std::min(xBitLength, 64)) {
+ MOZ_ASSERT(xLength >= 1,
+ "If there are more bits to fill, there should be "
+ "more digits to fill them from");
+
+ Digit second = x->digit(--xLength);
+ if (DigitBits == 32) {
+ xBitsFilled += 32;
+ xHigh64Bits |= uint64_t(second) << (64 - xBitsFilled);
+ if (xBitsFilled < 64 && xLength >= 1) {
+ Digit third = x->digit(--xLength);
+ const uint8_t neededBits = 64 - xBitsFilled;
+ xHigh64Bits |= uint64_t(third) >> (DigitBits - neededBits);
+ xHasNonZeroLeftoverBits = (third << neededBits) != 0;
+ }
+ } else {
+ const uint8_t neededBits = 64 - xBitsFilled;
+ xHigh64Bits |= uint64_t(second) >> (DigitBits - neededBits);
+ xHasNonZeroLeftoverBits = (second << neededBits) != 0;
+ }
+ }
+
+ // If high bits are unequal, the larger one has greater magnitude.
+ if (yHigh64Bits > xHigh64Bits) {
+ return xNegative ? GreaterThan : LessThan;
+ }
+ if (xHigh64Bits > yHigh64Bits) {
+ return xNegative ? LessThan : GreaterThan;
+ }
+
+ // Otherwise the top 64 bits of both are equal. If the values differ, a
+ // lower-order bit in `x` is nonzero and `x` has greater magnitude than
+ // `y`; otherwise `x == y`.
+ if (xHasNonZeroLeftoverBits) {
+ return xNegative ? LessThan : GreaterThan;
+ }
+ while (xLength != 0) {
+ if (x->digit(--xLength) != 0) {
+ return xNegative ? LessThan : GreaterThan;
+ }
+ }
+
+ return Equal;
+}
+
+bool BigInt::equal(BigInt* lhs, double rhs) {
+ if (std::isnan(rhs)) {
+ return false;
+ }
+ return compare(lhs, rhs) == 0;
+}
+
+JS::Result<bool> BigInt::equal(JSContext* cx, Handle<BigInt*> lhs,
+ HandleString rhs) {
+ BigInt* rhsBigInt;
+ MOZ_TRY_VAR(rhsBigInt, StringToBigInt(cx, rhs));
+ if (!rhsBigInt) {
+ return false;
+ }
+ return equal(lhs, rhsBigInt);
+}
+
+// BigInt proposal section 3.2.5
+JS::Result<bool> BigInt::looselyEqual(JSContext* cx, HandleBigInt lhs,
+ HandleValue rhs) {
+ // Step 1.
+ if (rhs.isBigInt()) {
+ return equal(lhs, rhs.toBigInt());
+ }
+
+ // Steps 2-5 (not applicable).
+
+ // Steps 6-7.
+ if (rhs.isString()) {
+ RootedString rhsString(cx, rhs.toString());
+ return equal(cx, lhs, rhsString);
+ }
+
+ // Steps 8-9 (not applicable).
+
+ // Steps 10-11.
+ if (rhs.isObject()) {
+ RootedValue rhsPrimitive(cx, rhs);
+ if (!ToPrimitive(cx, &rhsPrimitive)) {
+ return cx->alreadyReportedError();
+ }
+ return looselyEqual(cx, lhs, rhsPrimitive);
+ }
+
+ // Step 12.
+ if (rhs.isNumber()) {
+ return equal(lhs, rhs.toNumber());
+ }
+
+ // Step 13.
+ return false;
+}
+
+// BigInt proposal section 1.1.12. BigInt::lessThan ( x, y )
+bool BigInt::lessThan(BigInt* x, BigInt* y) { return compare(x, y) < 0; }
+
+Maybe<bool> BigInt::lessThan(BigInt* lhs, double rhs) {
+ if (std::isnan(rhs)) {
+ return Maybe<bool>(Nothing());
+ }
+ return Some(compare(lhs, rhs) < 0);
+}
+
+Maybe<bool> BigInt::lessThan(double lhs, BigInt* rhs) {
+ if (std::isnan(lhs)) {
+ return Maybe<bool>(Nothing());
+ }
+ return Some(-compare(rhs, lhs) < 0);
+}
+
+bool BigInt::lessThan(JSContext* cx, HandleBigInt lhs, HandleString rhs,
+ Maybe<bool>& res) {
+ BigInt* rhsBigInt;
+ JS_TRY_VAR_OR_RETURN_FALSE(cx, rhsBigInt, StringToBigInt(cx, rhs));
+ if (!rhsBigInt) {
+ res = Nothing();
+ return true;
+ }
+ res = Some(lessThan(lhs, rhsBigInt));
+ return true;
+}
+
+bool BigInt::lessThan(JSContext* cx, HandleString lhs, HandleBigInt rhs,
+ Maybe<bool>& res) {
+ BigInt* lhsBigInt;
+ JS_TRY_VAR_OR_RETURN_FALSE(cx, lhsBigInt, StringToBigInt(cx, lhs));
+ if (!lhsBigInt) {
+ res = Nothing();
+ return true;
+ }
+ res = Some(lessThan(lhsBigInt, rhs));
+ return true;
+}
+
+bool BigInt::lessThan(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ Maybe<bool>& res) {
+ if (lhs.isBigInt()) {
+ if (rhs.isString()) {
+ RootedBigInt lhsBigInt(cx, lhs.toBigInt());
+ RootedString rhsString(cx, rhs.toString());
+ return lessThan(cx, lhsBigInt, rhsString, res);
+ }
+
+ if (rhs.isNumber()) {
+ res = lessThan(lhs.toBigInt(), rhs.toNumber());
+ return true;
+ }
+
+ MOZ_ASSERT(rhs.isBigInt());
+ res = Some(lessThan(lhs.toBigInt(), rhs.toBigInt()));
+ return true;
+ }
+
+ MOZ_ASSERT(rhs.isBigInt());
+ if (lhs.isString()) {
+ RootedString lhsString(cx, lhs.toString());
+ RootedBigInt rhsBigInt(cx, rhs.toBigInt());
+ return lessThan(cx, lhsString, rhsBigInt, res);
+ }
+
+ MOZ_ASSERT(lhs.isNumber());
+ res = lessThan(lhs.toNumber(), rhs.toBigInt());
+ return true;
+}
+
+template <js::AllowGC allowGC>
+JSLinearString* BigInt::toString(JSContext* cx, HandleBigInt x, uint8_t radix) {
+ MOZ_ASSERT(2 <= radix && radix <= 36);
+
+ if (x->isZero()) {
+ return cx->staticStrings().getInt(0);
+ }
+
+ if (mozilla::IsPowerOfTwo(radix)) {
+ return toStringBasePowerOfTwo<allowGC>(cx, x, radix);
+ }
+
+ if (radix == 10 && x->digitLength() == 1) {
+ return toStringSingleDigitBaseTen<allowGC>(cx, x->digit(0),
+ x->isNegative());
+ }
+
+ // Punt on doing generic toString without GC.
+ if (!allowGC) {
+ return nullptr;
+ }
+
+ return toStringGeneric(cx, x, radix);
+}
+
+template JSLinearString* BigInt::toString<js::CanGC>(JSContext* cx,
+ HandleBigInt x,
+ uint8_t radix);
+template JSLinearString* BigInt::toString<js::NoGC>(JSContext* cx,
+ HandleBigInt x,
+ uint8_t radix);
+
+template <typename CharT>
+static inline BigInt* ParseStringBigIntLiteral(JSContext* cx,
+ Range<const CharT> range,
+ bool* haveParseError) {
+ auto start = range.begin();
+ auto end = range.end();
+
+ while (start < end && unicode::IsSpace(start[0])) {
+ start++;
+ }
+
+ while (start < end && unicode::IsSpace(end[-1])) {
+ end--;
+ }
+
+ if (start == end) {
+ return BigInt::zero(cx);
+ }
+
+ // StringNumericLiteral ::: StrDecimalLiteral, but without Infinity, decimal
+ // points, or exponents. Note that the raw '+' or '-' cases fall through
+ // because the string is too short, and eventually signal a parse error.
+ if (end - start > 1) {
+ if (start[0] == '+') {
+ bool isNegative = false;
+ start++;
+ return BigInt::parseLiteralDigits(cx, Range<const CharT>(start, end), 10,
+ isNegative, haveParseError);
+ }
+ if (start[0] == '-') {
+ bool isNegative = true;
+ start++;
+ return BigInt::parseLiteralDigits(cx, Range<const CharT>(start, end), 10,
+ isNegative, haveParseError);
+ }
+ }
+
+ return BigInt::parseLiteral(cx, Range<const CharT>(start, end),
+ haveParseError);
+}
+
+// Called from BigInt constructor.
+JS::Result<BigInt*> js::StringToBigInt(JSContext* cx, HandleString str) {
+ JSLinearString* linear = str->ensureLinear(cx);
+ if (!linear) {
+ return cx->alreadyReportedOOM();
+ }
+
+ AutoStableStringChars chars(cx);
+ if (!chars.init(cx, str)) {
+ return cx->alreadyReportedOOM();
+ }
+
+ BigInt* res;
+ bool parseError = false;
+ if (chars.isLatin1()) {
+ res = ParseStringBigIntLiteral(cx, chars.latin1Range(), &parseError);
+ } else {
+ res = ParseStringBigIntLiteral(cx, chars.twoByteRange(), &parseError);
+ }
+
+ // A nullptr result can indicate either a parse error or generic error.
+ if (!res && !parseError) {
+ return cx->alreadyReportedError();
+ }
+
+ return res;
+}
+
+// Called from parser with already trimmed and validated token.
+BigInt* js::ParseBigIntLiteral(JSContext* cx,
+ const Range<const char16_t>& chars) {
+ // This function is only called from the frontend when parsing BigInts. Parsed
+ // BigInts are stored in the script's data vector and therefore need to be
+ // allocated in the tenured heap.
+ constexpr gc::Heap heap = gc::Heap::Tenured;
+
+ bool parseError = false;
+ BigInt* res = BigInt::parseLiteral(cx, chars, &parseError, heap);
+ if (!res) {
+ return nullptr;
+ }
+ MOZ_ASSERT(res->isTenured());
+ MOZ_RELEASE_ASSERT(!parseError);
+ return res;
+}
+
+// Check a already validated numeric literal for a non-zero value. Used by
+// the parsers node folder in deferred mode.
+bool js::BigIntLiteralIsZero(const mozilla::Range<const char16_t>& chars) {
+ return BigInt::literalIsZero(chars);
+}
+
+template <js::AllowGC allowGC>
+JSAtom* js::BigIntToAtom(JSContext* cx, HandleBigInt bi) {
+ JSString* str = BigInt::toString<allowGC>(cx, bi, 10);
+ if (!str) {
+ return nullptr;
+ }
+ JSAtom* atom = AtomizeString(cx, str);
+ if (!atom) {
+ if constexpr (!allowGC) {
+ // NOTE: AtomizeString can call ReportAllocationOverflow other than
+ // ReportOutOfMemory, but ReportAllocationOverflow cannot happen
+ // because the length is guarded by BigInt::toString.
+ cx->recoverFromOutOfMemory();
+ }
+ return nullptr;
+ }
+ return atom;
+}
+
+template JSAtom* js::BigIntToAtom<js::CanGC>(JSContext* cx, HandleBigInt bi);
+template JSAtom* js::BigIntToAtom<js::NoGC>(JSContext* cx, HandleBigInt bi);
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+void BigInt::dump() const {
+ js::Fprinter out(stderr);
+ dump(out);
+}
+
+void BigInt::dump(js::GenericPrinter& out) const {
+ if (isNegative()) {
+ out.putChar('-');
+ }
+
+ if (digitLength() == 0) {
+ out.put("0");
+ } else if (digitLength() == 1) {
+ uint64_t d = digit(0);
+ out.printf("%" PRIu64, d);
+ } else {
+ out.put("0x");
+ for (size_t i = 0; i < digitLength(); i++) {
+ uint64_t d = digit(digitLength() - i - 1);
+ if (sizeof(Digit) == 4) {
+ out.printf("%.8" PRIX32, uint32_t(d));
+ } else {
+ out.printf("%.16" PRIX64, d);
+ }
+ }
+ }
+
+ out.putChar('n');
+}
+#endif
+
+JS::ubi::Node::Size JS::ubi::Concrete<BigInt>::size(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ BigInt& bi = get();
+ size_t size = sizeof(JS::BigInt);
+ if (IsInsideNursery(&bi)) {
+ size += Nursery::nurseryCellHeaderSize();
+ size += bi.sizeOfExcludingThisInNursery(mallocSizeOf);
+ } else {
+ size += bi.sizeOfExcludingThis(mallocSizeOf);
+ }
+ return size;
+}
+
+// Public API
+
+BigInt* JS::NumberToBigInt(JSContext* cx, double num) {
+ return js::NumberToBigInt(cx, num);
+}
+
+template <typename CharT>
+static inline BigInt* StringToBigIntHelper(JSContext* cx,
+ Range<const CharT>& chars) {
+ bool parseError = false;
+ BigInt* bi = ParseStringBigIntLiteral(cx, chars, &parseError);
+ if (!bi) {
+ if (parseError) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BIGINT_INVALID_SYNTAX);
+ }
+ return nullptr;
+ }
+ MOZ_RELEASE_ASSERT(!parseError);
+ return bi;
+}
+
+BigInt* JS::StringToBigInt(JSContext* cx, Range<const Latin1Char> chars) {
+ return StringToBigIntHelper(cx, chars);
+}
+
+BigInt* JS::StringToBigInt(JSContext* cx, Range<const char16_t> chars) {
+ return StringToBigIntHelper(cx, chars);
+}
+
+static inline BigInt* SimpleStringToBigIntHelper(
+ JSContext* cx, mozilla::Span<const Latin1Char> chars, uint8_t radix,
+ bool* haveParseError) {
+ if (chars.Length() > 1) {
+ if (chars[0] == '+') {
+ return BigInt::parseLiteralDigits(
+ cx, Range<const Latin1Char>{chars.From(1)}, radix,
+ /* isNegative = */ false, haveParseError);
+ }
+ if (chars[0] == '-') {
+ return BigInt::parseLiteralDigits(
+ cx, Range<const Latin1Char>{chars.From(1)}, radix,
+ /* isNegative = */ true, haveParseError);
+ }
+ }
+
+ return BigInt::parseLiteralDigits(cx, Range<const Latin1Char>{chars}, radix,
+ /* isNegative = */ false, haveParseError);
+}
+
+BigInt* JS::SimpleStringToBigInt(JSContext* cx, mozilla::Span<const char> chars,
+ uint8_t radix) {
+ if (chars.empty()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BIGINT_INVALID_SYNTAX);
+ return nullptr;
+ }
+ if (radix < 2 || radix > 36) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_RADIX);
+ return nullptr;
+ }
+
+ mozilla::Span<const Latin1Char> latin1{
+ reinterpret_cast<const Latin1Char*>(chars.data()), chars.size()};
+ bool haveParseError = false;
+ BigInt* bi = SimpleStringToBigIntHelper(cx, latin1, radix, &haveParseError);
+ if (!bi) {
+ if (haveParseError) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BIGINT_INVALID_SYNTAX);
+ }
+ return nullptr;
+ }
+ MOZ_RELEASE_ASSERT(!haveParseError);
+ return bi;
+}
+
+BigInt* JS::ToBigInt(JSContext* cx, HandleValue val) {
+ return js::ToBigInt(cx, val);
+}
+
+int64_t JS::ToBigInt64(JS::BigInt* bi) { return BigInt::toInt64(bi); }
+
+uint64_t JS::ToBigUint64(JS::BigInt* bi) { return BigInt::toUint64(bi); }
+
+double JS::BigIntToNumber(JS::BigInt* bi) { return BigInt::numberValue(bi); }
+
+bool JS::BigIntIsNegative(BigInt* bi) {
+ return !bi->isZero() && bi->isNegative();
+}
+
+bool JS::BigIntFitsNumber(BigInt* bi, double* out) {
+ return bi->isNumber(bi, out);
+}
+
+JSString* JS::BigIntToString(JSContext* cx, Handle<BigInt*> bi, uint8_t radix) {
+ if (radix < 2 || radix > 36) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_RADIX);
+ return nullptr;
+ }
+ return BigInt::toString<CanGC>(cx, bi, radix);
+}
+
+// Semi-public template details
+
+BigInt* JS::detail::BigIntFromInt64(JSContext* cx, int64_t num) {
+ return BigInt::createFromInt64(cx, num);
+}
+
+BigInt* JS::detail::BigIntFromUint64(JSContext* cx, uint64_t num) {
+ return BigInt::createFromUint64(cx, num);
+}
+
+BigInt* JS::detail::BigIntFromBool(JSContext* cx, bool b) {
+ return b ? BigInt::one(cx) : BigInt::zero(cx);
+}
+
+bool JS::detail::BigIntIsInt64(BigInt* bi, int64_t* result) {
+ return BigInt::isInt64(bi, result);
+}
+
+bool JS::detail::BigIntIsUint64(BigInt* bi, uint64_t* result) {
+ return BigInt::isUint64(bi, result);
+}
diff --git a/js/src/vm/BigIntType.h b/js/src/vm/BigIntType.h
new file mode 100644
index 0000000000..c8e264b20b
--- /dev/null
+++ b/js/src/vm/BigIntType.h
@@ -0,0 +1,481 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BigIntType_h
+#define vm_BigIntType_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/OperatorNewExtensions.h"
+#include "mozilla/Range.h"
+#include "mozilla/Span.h"
+
+#include "jstypes.h"
+
+#include "gc/Allocator.h"
+#include "gc/Cell.h"
+#include "gc/StoreBuffer.h"
+#include "js/Result.h"
+#include "js/RootingAPI.h"
+#include "js/TraceKind.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+
+namespace gc {
+class TenuringTracer;
+} // namespace gc
+
+namespace jit {
+class MacroAssembler;
+} // namespace jit
+
+} // namespace js
+
+namespace JS {
+
+class JS_PUBLIC_API BigInt;
+
+class BigInt final : public js::gc::CellWithLengthAndFlags {
+ friend class js::gc::CellAllocator;
+
+ BigInt() = default;
+
+ public:
+ using Digit = uintptr_t;
+
+ private:
+ // The low CellFlagBitsReservedForGC flag bits are reserved.
+ static constexpr uintptr_t SignBit =
+ js::Bit(js::gc::CellFlagBitsReservedForGC);
+
+ static constexpr size_t InlineDigitsLength =
+ (js::gc::MinCellSize - sizeof(CellWithLengthAndFlags)) / sizeof(Digit);
+
+ public:
+ // The number of digits and the flags are stored in the cell header.
+ size_t digitLength() const { return headerLengthField(); }
+
+ private:
+ // The digit storage starts with the least significant digit (little-endian
+ // digit order). Byte order within a digit is of course native endian.
+ union {
+ Digit* heapDigits_;
+ Digit inlineDigits_[InlineDigitsLength];
+ };
+
+ void setLengthAndFlags(uint32_t len, uint32_t flags) {
+ setHeaderLengthAndFlags(len, flags);
+ }
+
+ public:
+ static const JS::TraceKind TraceKind = JS::TraceKind::BigInt;
+
+ void fixupAfterMovingGC() {}
+
+ js::gc::AllocKind getAllocKind() const { return js::gc::AllocKind::BIGINT; }
+
+ // Offset for direct access from JIT code.
+ static constexpr size_t offsetOfDigitLength() {
+ return offsetOfHeaderLength();
+ }
+
+ bool hasInlineDigits() const { return digitLength() <= InlineDigitsLength; }
+ bool hasHeapDigits() const { return !hasInlineDigits(); }
+
+ using Digits = mozilla::Span<Digit>;
+ Digits digits() {
+ return Digits(hasInlineDigits() ? inlineDigits_ : heapDigits_,
+ digitLength());
+ }
+ using ConstDigits = mozilla::Span<const Digit>;
+ ConstDigits digits() const {
+ return ConstDigits(hasInlineDigits() ? inlineDigits_ : heapDigits_,
+ digitLength());
+ }
+ Digit digit(size_t idx) const { return digits()[idx]; }
+ void setDigit(size_t idx, Digit digit) { digits()[idx] = digit; }
+
+ bool isZero() const { return digitLength() == 0; }
+ bool isNegative() const { return headerFlagsField() & SignBit; }
+
+ void initializeDigitsToZero();
+
+ void traceChildren(JSTracer* trc);
+
+ static MOZ_ALWAYS_INLINE void postWriteBarrier(void* cellp, BigInt* prev,
+ BigInt* next) {
+ js::gc::PostWriteBarrierImpl<BigInt>(cellp, prev, next);
+ }
+
+ void finalize(JS::GCContext* gcx);
+ js::HashNumber hash() const;
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t sizeOfExcludingThisInNursery(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ static BigInt* createUninitialized(JSContext* cx, size_t digitLength,
+ bool isNegative,
+ js::gc::Heap heap = js::gc::Heap::Default);
+ static BigInt* createFromDouble(JSContext* cx, double d);
+ static BigInt* createFromUint64(JSContext* cx, uint64_t n);
+ static BigInt* createFromInt64(JSContext* cx, int64_t n);
+ static BigInt* createFromDigit(JSContext* cx, Digit d, bool isNegative);
+ static BigInt* createFromNonZeroRawUint64(JSContext* cx, uint64_t n,
+ bool isNegative);
+ // FIXME: Cache these values.
+ static BigInt* zero(JSContext* cx, js::gc::Heap heap = js::gc::Heap::Default);
+ static BigInt* one(JSContext* cx);
+ static BigInt* negativeOne(JSContext* cx);
+
+ static BigInt* copy(JSContext* cx, Handle<BigInt*> x,
+ js::gc::Heap heap = js::gc::Heap::Default);
+ static BigInt* add(JSContext* cx, Handle<BigInt*> x, Handle<BigInt*> y);
+ static BigInt* sub(JSContext* cx, Handle<BigInt*> x, Handle<BigInt*> y);
+ static BigInt* mul(JSContext* cx, Handle<BigInt*> x, Handle<BigInt*> y);
+ static BigInt* div(JSContext* cx, Handle<BigInt*> x, Handle<BigInt*> y);
+ static BigInt* mod(JSContext* cx, Handle<BigInt*> x, Handle<BigInt*> y);
+ static BigInt* pow(JSContext* cx, Handle<BigInt*> x, Handle<BigInt*> y);
+ static BigInt* neg(JSContext* cx, Handle<BigInt*> x);
+ static BigInt* inc(JSContext* cx, Handle<BigInt*> x);
+ static BigInt* dec(JSContext* cx, Handle<BigInt*> x);
+ static BigInt* lsh(JSContext* cx, Handle<BigInt*> x, Handle<BigInt*> y);
+ static BigInt* rsh(JSContext* cx, Handle<BigInt*> x, Handle<BigInt*> y);
+ static BigInt* bitAnd(JSContext* cx, Handle<BigInt*> x, Handle<BigInt*> y);
+ static BigInt* bitXor(JSContext* cx, Handle<BigInt*> x, Handle<BigInt*> y);
+ static BigInt* bitOr(JSContext* cx, Handle<BigInt*> x, Handle<BigInt*> y);
+ static BigInt* bitNot(JSContext* cx, Handle<BigInt*> x);
+
+ static int64_t toInt64(const BigInt* x);
+ static uint64_t toUint64(const BigInt* x);
+
+ // Return true if the BigInt is without loss of precision representable as an
+ // int64 and store the int64 value in the output. Otherwise return false and
+ // leave the value of the output parameter unspecified.
+ static bool isInt64(BigInt* x, int64_t* result);
+
+ // Return true if the BigInt is without loss of precision representable as an
+ // uint64 and store the uint64 value in the output. Otherwise return false and
+ // leave the value of the output parameter unspecified.
+ static bool isUint64(BigInt* x, uint64_t* result);
+
+ // Return true if the BigInt is without loss of precision representable as a
+ // JS Number (double) and store the double value in the output. Otherwise
+ // return false and leave the value of the output parameter unspecified.
+ static bool isNumber(BigInt* x, double* result);
+
+ static BigInt* asIntN(JSContext* cx, Handle<BigInt*> x, uint64_t bits);
+ static BigInt* asUintN(JSContext* cx, Handle<BigInt*> x, uint64_t bits);
+
+ // Type-checking versions of arithmetic operations. These methods
+ // must be called with at least one BigInt operand. Binary
+ // operations will throw a TypeError if one of the operands is not a
+ // BigInt value.
+ static bool addValue(JSContext* cx, Handle<Value> lhs, Handle<Value> rhs,
+ MutableHandle<Value> res);
+ static bool subValue(JSContext* cx, Handle<Value> lhs, Handle<Value> rhs,
+ MutableHandle<Value> res);
+ static bool mulValue(JSContext* cx, Handle<Value> lhs, Handle<Value> rhs,
+ MutableHandle<Value> res);
+ static bool divValue(JSContext* cx, Handle<Value> lhs, Handle<Value> rhs,
+ MutableHandle<Value> res);
+ static bool modValue(JSContext* cx, Handle<Value> lhs, Handle<Value> rhs,
+ MutableHandle<Value> res);
+ static bool powValue(JSContext* cx, Handle<Value> lhs, Handle<Value> rhs,
+ MutableHandle<Value> res);
+ static bool negValue(JSContext* cx, Handle<Value> operand,
+ MutableHandle<Value> res);
+ static bool incValue(JSContext* cx, Handle<Value> operand,
+ MutableHandle<Value> res);
+ static bool decValue(JSContext* cx, Handle<Value> operand,
+ MutableHandle<Value> res);
+ static bool lshValue(JSContext* cx, Handle<Value> lhs, Handle<Value> rhs,
+ MutableHandle<Value> res);
+ static bool rshValue(JSContext* cx, Handle<Value> lhs, Handle<Value> rhs,
+ MutableHandle<Value> res);
+ static bool bitAndValue(JSContext* cx, Handle<Value> lhs, Handle<Value> rhs,
+ MutableHandle<Value> res);
+ static bool bitXorValue(JSContext* cx, Handle<Value> lhs, Handle<Value> rhs,
+ MutableHandle<Value> res);
+ static bool bitOrValue(JSContext* cx, Handle<Value> lhs, Handle<Value> rhs,
+ MutableHandle<Value> res);
+ static bool bitNotValue(JSContext* cx, Handle<Value> operand,
+ MutableHandle<Value> res);
+
+ static double numberValue(BigInt* x);
+
+ template <js::AllowGC allowGC>
+ static JSLinearString* toString(JSContext* cx, Handle<BigInt*> x,
+ uint8_t radix);
+ template <typename CharT>
+ static BigInt* parseLiteral(JSContext* cx,
+ const mozilla::Range<const CharT> chars,
+ bool* haveParseError,
+ js::gc::Heap heap = js::gc::Heap::Default);
+ template <typename CharT>
+ static BigInt* parseLiteralDigits(JSContext* cx,
+ const mozilla::Range<const CharT> chars,
+ unsigned radix, bool isNegative,
+ bool* haveParseError,
+ js::gc::Heap heap = js::gc::Heap::Default);
+
+ template <typename CharT>
+ static bool literalIsZero(const mozilla::Range<const CharT> chars);
+
+ static int8_t compare(BigInt* lhs, BigInt* rhs);
+ static bool equal(BigInt* lhs, BigInt* rhs);
+ static bool equal(BigInt* lhs, double rhs);
+ static JS::Result<bool> equal(JSContext* cx, Handle<BigInt*> lhs,
+ HandleString rhs);
+ static JS::Result<bool> looselyEqual(JSContext* cx, Handle<BigInt*> lhs,
+ HandleValue rhs);
+
+ static bool lessThan(BigInt* x, BigInt* y);
+ // These methods return Nothing when the non-BigInt operand is NaN
+ // or a string that can't be interpreted as a BigInt.
+ static mozilla::Maybe<bool> lessThan(BigInt* lhs, double rhs);
+ static mozilla::Maybe<bool> lessThan(double lhs, BigInt* rhs);
+ static bool lessThan(JSContext* cx, Handle<BigInt*> lhs, HandleString rhs,
+ mozilla::Maybe<bool>& res);
+ static bool lessThan(JSContext* cx, HandleString lhs, Handle<BigInt*> rhs,
+ mozilla::Maybe<bool>& res);
+ static bool lessThan(JSContext* cx, HandleValue lhs, HandleValue rhs,
+ mozilla::Maybe<bool>& res);
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ void dump() const; // Debugger-friendly stderr dump.
+ void dump(js::GenericPrinter& out) const;
+#endif
+
+ public:
+ static constexpr size_t DigitBits = sizeof(Digit) * CHAR_BIT;
+
+ private:
+ static constexpr size_t HalfDigitBits = DigitBits / 2;
+ static constexpr Digit HalfDigitMask = (1ull << HalfDigitBits) - 1;
+
+ static_assert(DigitBits == 32 || DigitBits == 64,
+ "Unexpected BigInt Digit size");
+
+ // Limit the size of bigint values to 1 million bits, to prevent excessive
+ // memory usage. This limit may be raised in the future if needed. Note
+ // however that there are many parts of the implementation that rely on being
+ // able to count and index bits using a 32-bit signed ints, so until those
+ // sites are fixed, the practical limit is 0x7fffffff bits.
+ static constexpr size_t MaxBitLength = 1024 * 1024;
+ static constexpr size_t MaxDigitLength = MaxBitLength / DigitBits;
+
+ // BigInts can be serialized to strings of radix between 2 and 36. For a
+ // given bigint, radix 2 will take the most characters (one per bit).
+ // Ensure that the max bigint size is small enough so that we can fit the
+ // corresponding character count into a size_t, with space for a possible
+ // sign prefix.
+ static_assert(MaxBitLength <= std::numeric_limits<size_t>::max() - 1,
+ "BigInt max length must be small enough to be serialized as a "
+ "binary string");
+
+ static size_t calculateMaximumCharactersRequired(HandleBigInt x,
+ unsigned radix);
+ [[nodiscard]] static bool calculateMaximumDigitsRequired(JSContext* cx,
+ uint8_t radix,
+ size_t charCount,
+ size_t* result);
+
+ static bool absoluteDivWithDigitDivisor(
+ JSContext* cx, Handle<BigInt*> x, Digit divisor,
+ const mozilla::Maybe<MutableHandle<BigInt*>>& quotient, Digit* remainder,
+ bool quotientNegative);
+ static void internalMultiplyAdd(BigInt* source, Digit factor, Digit summand,
+ unsigned, BigInt* result);
+ static void multiplyAccumulate(BigInt* multiplicand, Digit multiplier,
+ BigInt* accumulator,
+ unsigned accumulatorIndex);
+ static bool absoluteDivWithBigIntDivisor(
+ JSContext* cx, Handle<BigInt*> dividend, Handle<BigInt*> divisor,
+ const mozilla::Maybe<MutableHandle<BigInt*>>& quotient,
+ const mozilla::Maybe<MutableHandle<BigInt*>>& remainder,
+ bool quotientNegative);
+
+ enum class LeftShiftMode { SameSizeResult, AlwaysAddOneDigit };
+
+ static BigInt* absoluteLeftShiftAlwaysCopy(JSContext* cx, Handle<BigInt*> x,
+ unsigned shift, LeftShiftMode);
+ static bool productGreaterThan(Digit factor1, Digit factor2, Digit high,
+ Digit low);
+ static BigInt* lshByAbsolute(JSContext* cx, HandleBigInt x, HandleBigInt y);
+ static BigInt* rshByAbsolute(JSContext* cx, HandleBigInt x, HandleBigInt y);
+ static BigInt* rshByMaximum(JSContext* cx, bool isNegative);
+ static BigInt* truncateAndSubFromPowerOfTwo(JSContext* cx, HandleBigInt x,
+ uint64_t bits,
+ bool resultNegative);
+
+ Digit absoluteInplaceAdd(BigInt* summand, unsigned startIndex);
+ Digit absoluteInplaceSub(BigInt* subtrahend, unsigned startIndex);
+ void inplaceRightShiftLowZeroBits(unsigned shift);
+ void inplaceMultiplyAdd(Digit multiplier, Digit part);
+
+ // The result of an SymmetricTrim bitwise op has as many digits as the
+ // smaller operand. A SymmetricFill bitwise op result has as many digits as
+ // the larger operand, with high digits (if any) copied from the larger
+ // operand. AsymmetricFill is like SymmetricFill, except the result has as
+ // many digits as the first operand; this kind is used for the and-not
+ // operation.
+ enum class BitwiseOpKind { SymmetricTrim, SymmetricFill, AsymmetricFill };
+
+ template <BitwiseOpKind kind, typename BitwiseOp>
+ static BigInt* absoluteBitwiseOp(JSContext* cx, Handle<BigInt*> x,
+ Handle<BigInt*> y, BitwiseOp&& op);
+
+ // Return `|x| & |y|`.
+ static BigInt* absoluteAnd(JSContext* cx, Handle<BigInt*> x,
+ Handle<BigInt*> y);
+
+ // Return `|x| | |y|`.
+ static BigInt* absoluteOr(JSContext* cx, Handle<BigInt*> x,
+ Handle<BigInt*> y);
+
+ // Return `|x| & ~|y|`.
+ static BigInt* absoluteAndNot(JSContext* cx, Handle<BigInt*> x,
+ Handle<BigInt*> y);
+
+ // Return `|x| ^ |y|`.
+ static BigInt* absoluteXor(JSContext* cx, Handle<BigInt*> x,
+ Handle<BigInt*> y);
+
+ // Return `(|x| + 1) * (resultNegative ? -1 : +1)`.
+ static BigInt* absoluteAddOne(JSContext* cx, Handle<BigInt*> x,
+ bool resultNegative);
+
+ // Return `(|x| - 1) * (resultNegative ? -1 : +1)`, with the precondition that
+ // |x| != 0.
+ static BigInt* absoluteSubOne(JSContext* cx, Handle<BigInt*> x,
+ bool resultNegative = false);
+
+ // Return `a + b`, incrementing `*carry` if the addition overflows.
+ static inline Digit digitAdd(Digit a, Digit b, Digit* carry) {
+ Digit result = a + b;
+ *carry += static_cast<Digit>(result < a);
+ return result;
+ }
+
+ // Return `left - right`, incrementing `*borrow` if the addition overflows.
+ static inline Digit digitSub(Digit left, Digit right, Digit* borrow) {
+ Digit result = left - right;
+ *borrow += static_cast<Digit>(result > left);
+ return result;
+ }
+
+ // Compute `a * b`, returning the low half of the result and putting the
+ // high half in `*high`.
+ static Digit digitMul(Digit a, Digit b, Digit* high);
+
+ // Divide `(high << DigitBits) + low` by `divisor`, returning the quotient
+ // and storing the remainder in `*remainder`, with the precondition that
+ // `high < divisor` so that the result fits in a Digit.
+ static Digit digitDiv(Digit high, Digit low, Digit divisor, Digit* remainder);
+
+ // Return `(|x| + |y|) * (resultNegative ? -1 : +1)`.
+ static BigInt* absoluteAdd(JSContext* cx, Handle<BigInt*> x,
+ Handle<BigInt*> y, bool resultNegative);
+
+ // Return `(|x| - |y|) * (resultNegative ? -1 : +1)`, with the precondition
+ // that |x| >= |y|.
+ static BigInt* absoluteSub(JSContext* cx, Handle<BigInt*> x,
+ Handle<BigInt*> y, bool resultNegative);
+
+ // If `|x| < |y|` return -1; if `|x| == |y|` return 0; otherwise return 1.
+ static int8_t absoluteCompare(BigInt* lhs, BigInt* rhs);
+
+ static int8_t compare(BigInt* lhs, double rhs);
+
+ template <js::AllowGC allowGC>
+ static JSLinearString* toStringBasePowerOfTwo(JSContext* cx, Handle<BigInt*>,
+ unsigned radix);
+ template <js::AllowGC allowGC>
+ static JSLinearString* toStringSingleDigitBaseTen(JSContext* cx, Digit digit,
+ bool isNegative);
+ static JSLinearString* toStringGeneric(JSContext* cx, Handle<BigInt*>,
+ unsigned radix);
+
+ static BigInt* destructivelyTrimHighZeroDigits(JSContext* cx, BigInt* x);
+
+ bool absFitsInUint64() const { return digitLength() <= 64 / DigitBits; }
+
+ uint64_t uint64FromAbsNonZero() const {
+ MOZ_ASSERT(!isZero());
+
+ uint64_t val = digit(0);
+ if (DigitBits == 32 && digitLength() > 1) {
+ val |= static_cast<uint64_t>(digit(1)) << 32;
+ }
+ return val;
+ }
+
+ friend struct ::JSStructuredCloneReader;
+ friend struct ::JSStructuredCloneWriter;
+
+ BigInt(const BigInt& other) = delete;
+ void operator=(const BigInt& other) = delete;
+
+ public:
+ static constexpr size_t offsetOfFlags() { return offsetOfHeaderFlags(); }
+ static constexpr size_t offsetOfLength() { return offsetOfHeaderLength(); }
+
+ static constexpr size_t signBitMask() { return SignBit; }
+
+ private:
+ // To help avoid writing Spectre-unsafe code, we only allow MacroAssembler to
+ // call the methods below.
+ friend class js::jit::MacroAssembler;
+
+ static size_t offsetOfInlineDigits() {
+ return offsetof(BigInt, inlineDigits_);
+ }
+
+ static size_t offsetOfHeapDigits() { return offsetof(BigInt, heapDigits_); }
+
+ static constexpr size_t inlineDigitsLength() { return InlineDigitsLength; }
+
+ private:
+ friend class js::gc::TenuringTracer;
+};
+
+static_assert(
+ sizeof(BigInt) >= js::gc::MinCellSize,
+ "sizeof(BigInt) must be greater than the minimum allocation size");
+
+static_assert(
+ sizeof(BigInt) == js::gc::MinCellSize,
+ "sizeof(BigInt) intended to be the same as the minimum allocation size");
+
+} // namespace JS
+
+namespace js {
+
+template <AllowGC allowGC>
+extern JSAtom* BigIntToAtom(JSContext* cx, JS::HandleBigInt bi);
+
+extern JS::BigInt* NumberToBigInt(JSContext* cx, double d);
+
+// Parse a BigInt from a string, using the method specified for StringToBigInt.
+// Used by the BigInt constructor among other places.
+extern JS::Result<JS::BigInt*> StringToBigInt(JSContext* cx,
+ JS::Handle<JSString*> str);
+
+// Parse a BigInt from an already-validated numeric literal. Used by the
+// parser. Can only fail in out-of-memory situations.
+extern JS::BigInt* ParseBigIntLiteral(
+ JSContext* cx, const mozilla::Range<const char16_t>& chars);
+
+// Check an already validated numeric literal for a non-zero value. Used by
+// the parsers node folder in deferred mode.
+extern bool BigIntLiteralIsZero(const mozilla::Range<const char16_t>& chars);
+
+extern JS::BigInt* ToBigInt(JSContext* cx, JS::Handle<JS::Value> v);
+extern JS::Result<int64_t> ToBigInt64(JSContext* cx, JS::Handle<JS::Value> v);
+extern JS::Result<uint64_t> ToBigUint64(JSContext* cx, JS::Handle<JS::Value> v);
+
+} // namespace js
+
+#endif
diff --git a/js/src/vm/BindingKind.h b/js/src/vm/BindingKind.h
new file mode 100644
index 0000000000..793aa7e82f
--- /dev/null
+++ b/js/src/vm/BindingKind.h
@@ -0,0 +1,111 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BindingKind_h
+#define vm_BindingKind_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_ASSERT_IF
+#include "mozilla/Casting.h" // mozilla::AssertedCast
+
+#include <stdint.h> // uint16_t, uint32_t
+
+#include "vm/BytecodeUtil.h" // LOCALNO_LIMIT, ENVCOORD_SLOT_LIMIT
+
+namespace js {
+
+enum class BindingKind : uint8_t {
+ Import,
+ FormalParameter,
+ Var,
+ Let,
+ Const,
+
+ // So you think named lambda callee names are consts? Nope! They don't
+ // throw when being assigned to in sloppy mode.
+ NamedLambdaCallee,
+
+ // ClassBodyScope bindings that aren't bindings in the spec, but are put into
+ // a scope as an implementation detail: `.privateBrand`,
+ // `.staticInitializers`, private names, and private accessor functions.
+ Synthetic,
+
+ // ClassBodyScope binding that stores the function object for a non-static
+ // private method.
+ PrivateMethod,
+};
+
+static inline bool BindingKindIsLexical(BindingKind kind) {
+ return kind == BindingKind::Let || kind == BindingKind::Const;
+}
+
+class BindingLocation {
+ public:
+ enum class Kind {
+ Global,
+ Argument,
+ Frame,
+ Environment,
+ Import,
+ NamedLambdaCallee
+ };
+
+ private:
+ Kind kind_;
+ uint32_t slot_;
+
+ BindingLocation(Kind kind, uint32_t slot) : kind_(kind), slot_(slot) {}
+
+ public:
+ static BindingLocation Global() {
+ return BindingLocation(Kind::Global, UINT32_MAX);
+ }
+
+ static BindingLocation Argument(uint16_t slot) {
+ return BindingLocation(Kind::Argument, slot);
+ }
+
+ static BindingLocation Frame(uint32_t slot) {
+ MOZ_ASSERT(slot < LOCALNO_LIMIT);
+ return BindingLocation(Kind::Frame, slot);
+ }
+
+ static BindingLocation Environment(uint32_t slot) {
+ MOZ_ASSERT(slot < ENVCOORD_SLOT_LIMIT);
+ return BindingLocation(Kind::Environment, slot);
+ }
+
+ static BindingLocation Import() {
+ return BindingLocation(Kind::Import, UINT32_MAX);
+ }
+
+ static BindingLocation NamedLambdaCallee() {
+ return BindingLocation(Kind::NamedLambdaCallee, UINT32_MAX);
+ }
+
+ bool operator==(const BindingLocation& other) const {
+ return kind_ == other.kind_ && slot_ == other.slot_;
+ }
+
+ bool operator!=(const BindingLocation& other) const {
+ return !operator==(other);
+ }
+
+ Kind kind() const { return kind_; }
+
+ uint32_t slot() const {
+ MOZ_ASSERT(kind_ == Kind::Frame || kind_ == Kind::Environment);
+ return slot_;
+ }
+
+ uint16_t argumentSlot() const {
+ MOZ_ASSERT(kind_ == Kind::Argument);
+ return mozilla::AssertedCast<uint16_t>(slot_);
+ }
+};
+
+} // namespace js
+
+#endif // vm_BindingKind_h
diff --git a/js/src/vm/BooleanObject-inl.h b/js/src/vm/BooleanObject-inl.h
new file mode 100644
index 0000000000..a8d9376403
--- /dev/null
+++ b/js/src/vm/BooleanObject-inl.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BooleanObject_inl_h
+#define vm_BooleanObject_inl_h
+
+#include "vm/BooleanObject.h"
+
+#include "vm/JSObject-inl.h"
+
+namespace js {
+
+inline BooleanObject* BooleanObject::create(
+ JSContext* cx, bool b, HandleObject proto /* = nullptr */) {
+ BooleanObject* obj = NewObjectWithClassProto<BooleanObject>(cx, proto);
+ if (!obj) {
+ return nullptr;
+ }
+ obj->setPrimitiveValue(b);
+ return obj;
+}
+
+} // namespace js
+
+#endif /* vm_BooleanObject_inl_h */
diff --git a/js/src/vm/BooleanObject.h b/js/src/vm/BooleanObject.h
new file mode 100644
index 0000000000..123f255d6a
--- /dev/null
+++ b/js/src/vm/BooleanObject.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BooleanObject_h
+#define vm_BooleanObject_h
+
+#include "vm/NativeObject.h"
+
+namespace js {
+
+class BooleanObject : public NativeObject {
+ /* Stores this Boolean object's [[PrimitiveValue]]. */
+ static const unsigned PRIMITIVE_VALUE_SLOT = 0;
+
+ static const ClassSpec classSpec_;
+
+ public:
+ static const unsigned RESERVED_SLOTS = 1;
+
+ static const JSClass class_;
+
+ /*
+ * Creates a new Boolean object boxing the given primitive bool.
+ * If proto is nullptr, the [[Prototype]] will default to Boolean.prototype.
+ */
+ static inline BooleanObject* create(JSContext* cx, bool b,
+ HandleObject proto = nullptr);
+
+ bool unbox() const { return getFixedSlot(PRIMITIVE_VALUE_SLOT).toBoolean(); }
+
+ private:
+ static JSObject* createPrototype(JSContext* cx, JSProtoKey key);
+
+ inline void setPrimitiveValue(bool b) {
+ setFixedSlot(PRIMITIVE_VALUE_SLOT, BooleanValue(b));
+ }
+};
+
+} // namespace js
+
+#endif /* vm_BooleanObject_h */
diff --git a/js/src/vm/BoundFunctionObject.cpp b/js/src/vm/BoundFunctionObject.cpp
new file mode 100644
index 0000000000..6d11611f90
--- /dev/null
+++ b/js/src/vm/BoundFunctionObject.cpp
@@ -0,0 +1,534 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/BoundFunctionObject.h"
+
+#include <string_view>
+
+#include "util/StringBuffer.h"
+#include "vm/Interpreter.h"
+#include "vm/Shape.h"
+#include "vm/Stack.h"
+
+#include "gc/ObjectKind-inl.h"
+#include "vm/JSFunction-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/Shape-inl.h"
+
+using namespace js;
+
+// Helper function to initialize `args` with all bound arguments + the arguments
+// supplied in `callArgs`.
+template <typename Args>
+static MOZ_ALWAYS_INLINE void FillArguments(Args& args,
+ BoundFunctionObject* bound,
+ size_t numBoundArgs,
+ const CallArgs& callArgs) {
+ MOZ_ASSERT(args.length() == numBoundArgs + callArgs.length());
+
+ if (numBoundArgs <= BoundFunctionObject::MaxInlineBoundArgs) {
+ for (size_t i = 0; i < numBoundArgs; i++) {
+ args[i].set(bound->getInlineBoundArg(i));
+ }
+ } else {
+ ArrayObject* boundArgs = bound->getBoundArgsArray();
+ for (size_t i = 0; i < numBoundArgs; i++) {
+ args[i].set(boundArgs->getDenseElement(i));
+ }
+ }
+
+ for (size_t i = 0; i < callArgs.length(); i++) {
+ args[numBoundArgs + i].set(callArgs[i]);
+ }
+}
+
+// ES2023 10.4.1.1 [[Call]]
+// https://tc39.es/ecma262/#sec-bound-function-exotic-objects-call-thisargument-argumentslist
+// static
+bool BoundFunctionObject::call(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ Rooted<BoundFunctionObject*> bound(cx,
+ &args.callee().as<BoundFunctionObject>());
+
+ // Step 1.
+ Rooted<Value> target(cx, bound->getTargetVal());
+
+ // Step 2.
+ Rooted<Value> boundThis(cx, bound->getBoundThis());
+
+ // Steps 3-4.
+ size_t numBoundArgs = bound->numBoundArgs();
+ InvokeArgs args2(cx);
+ if (!args2.init(cx, uint64_t(numBoundArgs) + args.length())) {
+ return false;
+ }
+ FillArguments(args2, bound, numBoundArgs, args);
+
+ // Step 5.
+ return Call(cx, target, boundThis, args2, args.rval());
+}
+
+// ES2023 10.4.1.2 [[Construct]]
+// https://tc39.es/ecma262/#sec-bound-function-exotic-objects-construct-argumentslist-newtarget
+// static
+bool BoundFunctionObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ Rooted<BoundFunctionObject*> bound(cx,
+ &args.callee().as<BoundFunctionObject>());
+
+ MOZ_ASSERT(bound->isConstructor(),
+ "shouldn't have called this hook if not a constructor");
+
+ // Step 1.
+ Rooted<Value> target(cx, bound->getTargetVal());
+
+ // Step 2.
+ MOZ_ASSERT(IsConstructor(target));
+
+ // Steps 3-4.
+ size_t numBoundArgs = bound->numBoundArgs();
+ ConstructArgs args2(cx);
+ if (!args2.init(cx, uint64_t(numBoundArgs) + args.length())) {
+ return false;
+ }
+ FillArguments(args2, bound, numBoundArgs, args);
+
+ // Step 5.
+ Rooted<Value> newTarget(cx, args.newTarget());
+ if (newTarget == ObjectValue(*bound)) {
+ newTarget = target;
+ }
+
+ // Step 6.
+ Rooted<JSObject*> res(cx);
+ if (!Construct(cx, target, args2, newTarget, &res)) {
+ return false;
+ }
+ args.rval().setObject(*res);
+ return true;
+}
+
+// static
+JSString* BoundFunctionObject::funToString(JSContext* cx, Handle<JSObject*> obj,
+ bool isToSource) {
+ // Implementation of the funToString hook used by Function.prototype.toString.
+
+ // For the non-standard toSource extension, we include "bound" to indicate
+ // it's a bound function.
+ if (isToSource) {
+ static constexpr std::string_view nativeCodeBound =
+ "function bound() {\n [native code]\n}";
+ return NewStringCopy<CanGC>(cx, nativeCodeBound);
+ }
+
+ static constexpr std::string_view nativeCode =
+ "function() {\n [native code]\n}";
+ return NewStringCopy<CanGC>(cx, nativeCode);
+}
+
+// static
+SharedShape* BoundFunctionObject::assignInitialShape(
+ JSContext* cx, Handle<BoundFunctionObject*> obj) {
+ MOZ_ASSERT(obj->empty());
+
+ constexpr PropertyFlags propFlags = {PropertyFlag::Configurable};
+ if (!NativeObject::addPropertyInReservedSlot(cx, obj, cx->names().length,
+ LengthSlot, propFlags)) {
+ return nullptr;
+ }
+ if (!NativeObject::addPropertyInReservedSlot(cx, obj, cx->names().name,
+ NameSlot, propFlags)) {
+ return nullptr;
+ }
+
+ SharedShape* shape = obj->sharedShape();
+ if (shape->proto() == TaggedProto(&cx->global()->getFunctionPrototype())) {
+ cx->global()->setBoundFunctionShapeWithDefaultProto(shape);
+ }
+ return shape;
+}
+
+static MOZ_ALWAYS_INLINE bool ComputeLengthValue(
+ JSContext* cx, Handle<BoundFunctionObject*> bound, Handle<JSObject*> target,
+ size_t numBoundArgs, double* length) {
+ *length = 0.0;
+
+ // Try to avoid invoking the JSFunction resolve hook.
+ if (target->is<JSFunction>() &&
+ !target->as<JSFunction>().hasResolvedLength()) {
+ uint16_t targetLength;
+ if (!JSFunction::getUnresolvedLength(cx, target.as<JSFunction>(),
+ &targetLength)) {
+ return false;
+ }
+
+ if (size_t(targetLength) > numBoundArgs) {
+ *length = size_t(targetLength) - numBoundArgs;
+ }
+ return true;
+ }
+
+ // Use a fast path for getting the .length value if the target is a bound
+ // function with its initial shape.
+ Value targetLength;
+ if (target->is<BoundFunctionObject>() && target->shape() == bound->shape()) {
+ BoundFunctionObject* targetFn = &target->as<BoundFunctionObject>();
+ targetLength = targetFn->getLengthForInitialShape();
+ } else {
+ bool hasLength;
+ Rooted<PropertyKey> key(cx, NameToId(cx->names().length));
+ if (!HasOwnProperty(cx, target, key, &hasLength)) {
+ return false;
+ }
+
+ if (!hasLength) {
+ return true;
+ }
+
+ Rooted<Value> targetLengthRoot(cx);
+ if (!GetProperty(cx, target, target, key, &targetLengthRoot)) {
+ return false;
+ }
+ targetLength = targetLengthRoot;
+ }
+
+ if (targetLength.isNumber()) {
+ *length = std::max(
+ 0.0, JS::ToInteger(targetLength.toNumber()) - double(numBoundArgs));
+ }
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE JSAtom* AppendBoundFunctionPrefix(JSContext* cx,
+ JSString* str) {
+ auto& cache = cx->zone()->boundPrefixCache();
+
+ JSAtom* strAtom = str->isAtom() ? &str->asAtom() : nullptr;
+ if (strAtom) {
+ if (auto p = cache.lookup(strAtom)) {
+ return p->value();
+ }
+ }
+
+ StringBuffer sb(cx);
+ if (!sb.append("bound ") || !sb.append(str)) {
+ return nullptr;
+ }
+ JSAtom* atom = sb.finishAtom();
+ if (!atom) {
+ return nullptr;
+ }
+
+ if (strAtom) {
+ (void)cache.putNew(strAtom, atom);
+ }
+ return atom;
+}
+
+static MOZ_ALWAYS_INLINE JSAtom* ComputeNameValue(
+ JSContext* cx, Handle<BoundFunctionObject*> bound,
+ Handle<JSObject*> target) {
+ // Try to avoid invoking the JSFunction resolve hook.
+ JSString* name = nullptr;
+ if (target->is<JSFunction>() && !target->as<JSFunction>().hasResolvedName()) {
+ JSFunction* targetFn = &target->as<JSFunction>();
+ name = targetFn->infallibleGetUnresolvedName(cx);
+ } else {
+ // Use a fast path for getting the .name value if the target is a bound
+ // function with its initial shape.
+ Value targetName;
+ if (target->is<BoundFunctionObject>() &&
+ target->shape() == bound->shape()) {
+ BoundFunctionObject* targetFn = &target->as<BoundFunctionObject>();
+ targetName = targetFn->getNameForInitialShape();
+ } else {
+ Rooted<Value> targetNameRoot(cx);
+ if (!GetProperty(cx, target, target, cx->names().name, &targetNameRoot)) {
+ return nullptr;
+ }
+ targetName = targetNameRoot;
+ }
+ if (!targetName.isString()) {
+ return cx->names().boundWithSpace;
+ }
+ name = targetName.toString();
+ }
+
+ return AppendBoundFunctionPrefix(cx, name);
+}
+
+// ES2023 20.2.3.2 Function.prototype.bind
+// https://tc39.es/ecma262/#sec-function.prototype.bind
+// static
+bool BoundFunctionObject::functionBind(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Steps 1-2.
+ if (!IsCallable(args.thisv())) {
+ ReportIncompatibleMethod(cx, args, &FunctionClass);
+ return false;
+ }
+
+ if (MOZ_UNLIKELY(args.length() > ARGS_LENGTH_MAX)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TOO_MANY_ARGUMENTS);
+ return false;
+ }
+
+ Rooted<JSObject*> target(cx, &args.thisv().toObject());
+
+ BoundFunctionObject* bound =
+ functionBindImpl(cx, target, args.array(), args.length(), nullptr);
+ if (!bound) {
+ return false;
+ }
+
+ // Step 11.
+ args.rval().setObject(*bound);
+ return true;
+}
+
+// ES2023 20.2.3.2 Function.prototype.bind
+// https://tc39.es/ecma262/#sec-function.prototype.bind
+//
+// ES2023 10.4.1.3 BoundFunctionCreate
+// https://tc39.es/ecma262/#sec-boundfunctioncreate
+//
+// BoundFunctionCreate has been inlined in Function.prototype.bind for
+// performance reasons.
+//
+// static
+BoundFunctionObject* BoundFunctionObject::functionBindImpl(
+ JSContext* cx, Handle<JSObject*> target, Value* args, uint32_t argc,
+ Handle<BoundFunctionObject*> maybeBound) {
+ MOZ_ASSERT(target->isCallable());
+
+ // Make sure the arguments on the stack are rooted when we're called directly
+ // from JIT code.
+ RootedExternalValueArray argsRoot(cx, argc, args);
+
+ size_t numBoundArgs = argc > 0 ? argc - 1 : 0;
+ MOZ_ASSERT(numBoundArgs <= ARGS_LENGTH_MAX, "ensured by callers");
+
+ // If this assertion fails, make sure we use the correct AllocKind and that we
+ // use all of its slots (consider increasing MaxInlineBoundArgs).
+ static_assert(gc::GetGCKindSlots(allocKind) == SlotCount);
+
+ // ES2023 10.4.1.3 BoundFunctionCreate
+ // Steps 1-5.
+ Rooted<BoundFunctionObject*> bound(cx);
+ if (maybeBound) {
+ // We allocated a bound function in JIT code. In the uncommon case of the
+ // target not having Function.prototype as proto, we have to set the right
+ // proto here.
+ bound = maybeBound;
+ if (MOZ_UNLIKELY(bound->staticPrototype() != target->staticPrototype())) {
+ Rooted<JSObject*> proto(cx, target->staticPrototype());
+ if (!SetPrototype(cx, bound, proto)) {
+ return nullptr;
+ }
+ }
+ } else {
+ // Step 1.
+ Rooted<JSObject*> proto(cx);
+ if (!GetPrototype(cx, target, &proto)) {
+ return nullptr;
+ }
+
+ // Steps 2-5.
+ if (proto == &cx->global()->getFunctionPrototype() &&
+ cx->global()->maybeBoundFunctionShapeWithDefaultProto()) {
+ Rooted<SharedShape*> shape(
+ cx, cx->global()->maybeBoundFunctionShapeWithDefaultProto());
+ JSObject* obj =
+ NativeObject::create(cx, allocKind, gc::Heap::Default, shape);
+ if (!obj) {
+ return nullptr;
+ }
+ bound = &obj->as<BoundFunctionObject>();
+ } else {
+ bound = NewObjectWithGivenProto<BoundFunctionObject>(cx, proto);
+ if (!bound) {
+ return nullptr;
+ }
+ if (!SharedShape::ensureInitialCustomShape<BoundFunctionObject>(cx,
+ bound)) {
+ return nullptr;
+ }
+ }
+ }
+
+ MOZ_ASSERT(bound->lookupPure(cx->names().length)->slot() == LengthSlot);
+ MOZ_ASSERT(bound->lookupPure(cx->names().name)->slot() == NameSlot);
+
+ // Steps 6 and 9.
+ bound->initFlags(numBoundArgs, target->isConstructor());
+
+ // Step 7.
+ bound->initReservedSlot(TargetSlot, ObjectValue(*target));
+
+ // Step 8.
+ if (argc > 0) {
+ bound->initReservedSlot(BoundThisSlot, args[0]);
+ }
+
+ if (numBoundArgs <= MaxInlineBoundArgs) {
+ for (size_t i = 0; i < numBoundArgs; i++) {
+ bound->initReservedSlot(BoundArg0Slot + i, args[i + 1]);
+ }
+ } else {
+ ArrayObject* arr = NewDenseCopiedArray(cx, numBoundArgs, args + 1);
+ if (!arr) {
+ return nullptr;
+ }
+ bound->initReservedSlot(BoundArg0Slot, ObjectValue(*arr));
+ }
+
+ // ES2023 20.2.3.2 Function.prototype.bind
+ // Step 4.
+ double length = 0.0;
+
+ // Steps 5-6.
+ if (!ComputeLengthValue(cx, bound, target, numBoundArgs, &length)) {
+ return nullptr;
+ }
+
+ // Step 7.
+ bound->initLength(length);
+
+ // Steps 8-9.
+ JSAtom* name = ComputeNameValue(cx, bound, target);
+ if (!name) {
+ return nullptr;
+ }
+
+ // Step 10.
+ bound->initName(name);
+
+ // Step 11.
+ return bound;
+}
+
+// static
+BoundFunctionObject* BoundFunctionObject::createWithTemplate(
+ JSContext* cx, Handle<BoundFunctionObject*> templateObj) {
+ Rooted<SharedShape*> shape(cx, templateObj->sharedShape());
+ JSObject* obj = NativeObject::create(cx, allocKind, gc::Heap::Default, shape);
+ if (!obj) {
+ return nullptr;
+ }
+ BoundFunctionObject* bound = &obj->as<BoundFunctionObject>();
+ bound->initFlags(templateObj->numBoundArgs(), templateObj->isConstructor());
+ bound->initLength(templateObj->getLengthForInitialShape().toInt32());
+ bound->initName(&templateObj->getNameForInitialShape().toString()->asAtom());
+ return bound;
+}
+
+// static
+BoundFunctionObject* BoundFunctionObject::functionBindSpecializedBaseline(
+ JSContext* cx, Handle<JSObject*> target, Value* args, uint32_t argc,
+ Handle<BoundFunctionObject*> templateObj) {
+ // Root the Values on the stack.
+ RootedExternalValueArray argsRoot(cx, argc, args);
+
+ MOZ_ASSERT(target->is<JSFunction>() || target->is<BoundFunctionObject>());
+ MOZ_ASSERT(target->isCallable());
+ MOZ_ASSERT(target->isConstructor() == templateObj->isConstructor());
+ MOZ_ASSERT(target->staticPrototype() == templateObj->staticPrototype());
+
+ size_t numBoundArgs = argc > 0 ? argc - 1 : 0;
+ MOZ_ASSERT(numBoundArgs <= MaxInlineBoundArgs);
+
+ BoundFunctionObject* bound = createWithTemplate(cx, templateObj);
+ if (!bound) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(bound->lookupPure(cx->names().length)->slot() == LengthSlot);
+ MOZ_ASSERT(bound->lookupPure(cx->names().name)->slot() == NameSlot);
+
+ bound->initReservedSlot(TargetSlot, ObjectValue(*target));
+ if (argc > 0) {
+ bound->initReservedSlot(BoundThisSlot, args[0]);
+ }
+ for (size_t i = 0; i < numBoundArgs; i++) {
+ bound->initReservedSlot(BoundArg0Slot + i, args[i + 1]);
+ }
+ return bound;
+}
+
+// static
+BoundFunctionObject* BoundFunctionObject::createTemplateObject(JSContext* cx) {
+ Rooted<JSObject*> proto(cx, &cx->global()->getFunctionPrototype());
+ Rooted<BoundFunctionObject*> bound(
+ cx, NewTenuredObjectWithGivenProto<BoundFunctionObject>(cx, proto));
+ if (!bound) {
+ return nullptr;
+ }
+ if (!SharedShape::ensureInitialCustomShape<BoundFunctionObject>(cx, bound)) {
+ return nullptr;
+ }
+ return bound;
+}
+
+bool BoundFunctionObject::initTemplateSlotsForSpecializedBind(
+ JSContext* cx, uint32_t numBoundArgs, bool targetIsConstructor,
+ uint32_t targetLength, JSAtom* targetName) {
+ size_t len = 0;
+ if (targetLength > numBoundArgs) {
+ len = targetLength - numBoundArgs;
+ }
+
+ JSAtom* name = AppendBoundFunctionPrefix(cx, targetName);
+ if (!name) {
+ return false;
+ }
+
+ initFlags(numBoundArgs, targetIsConstructor);
+ initLength(len);
+ initName(name);
+ return true;
+}
+
+static const JSClassOps classOps = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ nullptr, // finalize
+ BoundFunctionObject::call, // call
+ BoundFunctionObject::construct, // construct
+ nullptr, // trace
+};
+
+static const ObjectOps objOps = {
+ nullptr, // lookupProperty
+ nullptr, // qdefineProperty
+ nullptr, // hasProperty
+ nullptr, // getProperty
+ nullptr, // setProperty
+ nullptr, // getOwnPropertyDescriptor
+ nullptr, // deleteProperty
+ nullptr, // getElements
+ BoundFunctionObject::funToString, // funToString
+};
+
+const JSClass BoundFunctionObject::class_ = {
+ "BoundFunctionObject",
+ // Note: bound functions don't have their own constructor or prototype (they
+ // use the prototype of the target object), but we give them a JSProtoKey
+ // because that's what Xray wrappers use to identify builtin objects.
+ JSCLASS_HAS_CACHED_PROTO(JSProto_BoundFunction) |
+ JSCLASS_HAS_RESERVED_SLOTS(BoundFunctionObject::SlotCount),
+ &classOps,
+ JS_NULL_CLASS_SPEC,
+ JS_NULL_CLASS_EXT,
+ &objOps,
+};
diff --git a/js/src/vm/BoundFunctionObject.h b/js/src/vm/BoundFunctionObject.h
new file mode 100644
index 0000000000..566bdc0bed
--- /dev/null
+++ b/js/src/vm/BoundFunctionObject.h
@@ -0,0 +1,174 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BoundFunctionObject_h
+#define vm_BoundFunctionObject_h
+
+#include "jstypes.h"
+
+#include "gc/Policy.h"
+#include "vm/ArrayObject.h"
+#include "vm/JSAtom.h"
+#include "vm/JSObject.h"
+
+namespace js {
+
+// Implementation of Bound Function Exotic Objects.
+// ES2023 10.4.1
+// https://tc39.es/ecma262/#sec-bound-function-exotic-objects
+class BoundFunctionObject : public NativeObject {
+ public:
+ static const JSClass class_;
+
+ // FlagsSlot uses the low bit for the is-constructor flag and the other bits
+ // for the number of arguments.
+ static constexpr size_t IsConstructorFlag = 0b1;
+ static constexpr size_t NumBoundArgsShift = 1;
+
+ // The maximum number of bound arguments that can be stored inline in
+ // BoundArg*Slot.
+ static constexpr size_t MaxInlineBoundArgs = 3;
+
+ private:
+ enum {
+ // The [[BoundTargetFunction]] (a callable object).
+ TargetSlot,
+
+ // The number of arguments + the is-constructor flag, stored as Int32Value.
+ FlagsSlot,
+
+ // The [[BoundThis]] Value.
+ BoundThisSlot,
+
+ // The [[BoundArguments]]. If numBoundArgs exceeds MaxInlineBoundArgs,
+ // BoundArg0Slot will contain an array object that stores the values and the
+ // other two slots will be unused.
+ BoundArg0Slot,
+ BoundArg1Slot,
+ BoundArg2Slot,
+
+ // Initial slots for the `length` and `name` own data properties. Note that
+ // these properties are configurable, so these slots can be mutated when the
+ // object is exposed to JS.
+ LengthSlot,
+ NameSlot,
+
+ SlotCount
+ };
+
+ // The AllocKind should match SlotCount. See assertion in functionBindImpl.
+ static constexpr gc::AllocKind allocKind = gc::AllocKind::OBJECT8_BACKGROUND;
+
+ void initFlags(size_t numBoundArgs, bool isConstructor) {
+ int32_t val = (numBoundArgs << NumBoundArgsShift) | isConstructor;
+ initReservedSlot(FlagsSlot, Int32Value(val));
+ }
+
+ public:
+ size_t numBoundArgs() const {
+ int32_t v = getReservedSlot(FlagsSlot).toInt32();
+ MOZ_ASSERT(v >= 0);
+ return v >> NumBoundArgsShift;
+ }
+ bool isConstructor() const {
+ int32_t v = getReservedSlot(FlagsSlot).toInt32();
+ return v & IsConstructorFlag;
+ }
+
+ Value getTargetVal() const { return getReservedSlot(TargetSlot); }
+ JSObject* getTarget() const { return &getTargetVal().toObject(); }
+
+ Value getBoundThis() const { return getReservedSlot(BoundThisSlot); }
+
+ Value getInlineBoundArg(size_t i) const {
+ MOZ_ASSERT(i < numBoundArgs());
+ MOZ_ASSERT(numBoundArgs() <= MaxInlineBoundArgs);
+ return getReservedSlot(BoundArg0Slot + i);
+ }
+ ArrayObject* getBoundArgsArray() const {
+ MOZ_ASSERT(numBoundArgs() > MaxInlineBoundArgs);
+ return &getReservedSlot(BoundArg0Slot).toObject().as<ArrayObject>();
+ }
+ Value getBoundArg(size_t i) const {
+ MOZ_ASSERT(i < numBoundArgs());
+ if (numBoundArgs() <= MaxInlineBoundArgs) {
+ return getInlineBoundArg(i);
+ }
+ return getBoundArgsArray()->getDenseElement(i);
+ }
+
+ void initLength(double len) {
+ MOZ_ASSERT(getReservedSlot(LengthSlot).isUndefined());
+ initReservedSlot(LengthSlot, NumberValue(len));
+ }
+ void initName(JSAtom* name) {
+ MOZ_ASSERT(getReservedSlot(NameSlot).isUndefined());
+ initReservedSlot(NameSlot, StringValue(name));
+ }
+
+ // Get the `length` and `name` property values when the object has the
+ // original shape. See comment for LengthSlot and NameSlot.
+ Value getLengthForInitialShape() const { return getReservedSlot(LengthSlot); }
+ Value getNameForInitialShape() const { return getReservedSlot(NameSlot); }
+
+ // The [[Call]] and [[Construct]] hooks.
+ static bool call(JSContext* cx, unsigned argc, Value* vp);
+ static bool construct(JSContext* cx, unsigned argc, Value* vp);
+
+ // The JSFunToStringOp implementation for Function.prototype.toString.
+ static JSString* funToString(JSContext* cx, Handle<JSObject*> obj,
+ bool isToSource);
+
+ // Implementation of Function.prototype.bind.
+ static bool functionBind(JSContext* cx, unsigned argc, Value* vp);
+
+ static SharedShape* assignInitialShape(JSContext* cx,
+ Handle<BoundFunctionObject*> obj);
+
+ static BoundFunctionObject* functionBindImpl(
+ JSContext* cx, Handle<JSObject*> target, Value* args, uint32_t argc,
+ Handle<BoundFunctionObject*> maybeBound);
+
+ static BoundFunctionObject* createWithTemplate(
+ JSContext* cx, Handle<BoundFunctionObject*> templateObj);
+ static BoundFunctionObject* functionBindSpecializedBaseline(
+ JSContext* cx, Handle<JSObject*> target, Value* args, uint32_t argc,
+ Handle<BoundFunctionObject*> templateObj);
+
+ static BoundFunctionObject* createTemplateObject(JSContext* cx);
+
+ bool initTemplateSlotsForSpecializedBind(JSContext* cx, uint32_t numBoundArgs,
+ bool targetIsConstructor,
+ uint32_t targetLength,
+ JSAtom* targetName);
+
+ static constexpr size_t offsetOfTargetSlot() {
+ return getFixedSlotOffset(TargetSlot);
+ }
+ static constexpr size_t offsetOfFlagsSlot() {
+ return getFixedSlotOffset(FlagsSlot);
+ }
+ static constexpr size_t offsetOfBoundThisSlot() {
+ return getFixedSlotOffset(BoundThisSlot);
+ }
+ static constexpr size_t offsetOfFirstInlineBoundArg() {
+ return getFixedSlotOffset(BoundArg0Slot);
+ }
+ static constexpr size_t offsetOfLengthSlot() {
+ return getFixedSlotOffset(LengthSlot);
+ }
+ static constexpr size_t offsetOfNameSlot() {
+ return getFixedSlotOffset(NameSlot);
+ }
+
+ static constexpr size_t targetSlot() { return TargetSlot; }
+ static constexpr size_t boundThisSlot() { return BoundThisSlot; }
+ static constexpr size_t firstInlineBoundArgSlot() { return BoundArg0Slot; }
+};
+
+}; // namespace js
+
+#endif /* vm_BoundFunctionObject_h */
diff --git a/js/src/vm/BuildId.cpp b/js/src/vm/BuildId.cpp
new file mode 100644
index 0000000000..6183a79014
--- /dev/null
+++ b/js/src/vm/BuildId.cpp
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* SpiderMonkey buildId-related functionality. */
+
+#include "js/BuildId.h" // JS::BuildIdCharVector, JS::BuildIdOp, JS::GetOptimizedEncodingBuildId, JS::SetProcessBuildIdOp
+
+#include "mozilla/Atomics.h" // mozilla::Atomic
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "vm/Runtime.h" // js::GetBuildId
+#include "wasm/WasmModule.h" // js::wasm::GetOptimizedEncodingBuildId
+
+mozilla::Atomic<JS::BuildIdOp> js::GetBuildId;
+
+JS_PUBLIC_API void JS::SetProcessBuildIdOp(JS::BuildIdOp buildIdOp) {
+ js::GetBuildId = buildIdOp;
+}
+
+JS_PUBLIC_API bool JS::GetOptimizedEncodingBuildId(
+ JS::BuildIdCharVector* buildId) {
+ return js::wasm::GetOptimizedEncodingBuildId(buildId);
+}
diff --git a/js/src/vm/BuiltinObjectKind.cpp b/js/src/vm/BuiltinObjectKind.cpp
new file mode 100644
index 0000000000..dbc6a9ccdc
--- /dev/null
+++ b/js/src/vm/BuiltinObjectKind.cpp
@@ -0,0 +1,205 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/BuiltinObjectKind.h"
+
+#include "jspubtd.h"
+
+#include "frontend/ParserAtom.h"
+#include "vm/GlobalObject.h"
+
+using namespace js;
+
+static JSProtoKey ToProtoKey(BuiltinObjectKind kind) {
+ switch (kind) {
+ case BuiltinObjectKind::Array:
+ return JSProto_Array;
+ case BuiltinObjectKind::ArrayBuffer:
+ return JSProto_ArrayBuffer;
+ case BuiltinObjectKind::Int32Array:
+ return JSProto_Int32Array;
+ case BuiltinObjectKind::Iterator:
+ return JSProto_Iterator;
+ case BuiltinObjectKind::Map:
+ return JSProto_Map;
+ case BuiltinObjectKind::Promise:
+ return JSProto_Promise;
+ case BuiltinObjectKind::RegExp:
+ return JSProto_RegExp;
+ case BuiltinObjectKind::Set:
+ return JSProto_Set;
+ case BuiltinObjectKind::SharedArrayBuffer:
+ return JSProto_SharedArrayBuffer;
+ case BuiltinObjectKind::Symbol:
+ return JSProto_Symbol;
+
+ case BuiltinObjectKind::FunctionPrototype:
+ return JSProto_Function;
+ case BuiltinObjectKind::ObjectPrototype:
+ return JSProto_Object;
+ case BuiltinObjectKind::RegExpPrototype:
+ return JSProto_RegExp;
+ case BuiltinObjectKind::StringPrototype:
+ return JSProto_String;
+
+ case BuiltinObjectKind::DateTimeFormatPrototype:
+ return JSProto_DateTimeFormat;
+ case BuiltinObjectKind::NumberFormatPrototype:
+ return JSProto_NumberFormat;
+
+ case BuiltinObjectKind::None:
+ break;
+ }
+ MOZ_CRASH("Unexpected builtin object kind");
+}
+
+static bool IsPrototype(BuiltinObjectKind kind) {
+ switch (kind) {
+ case BuiltinObjectKind::Array:
+ case BuiltinObjectKind::ArrayBuffer:
+ case BuiltinObjectKind::Int32Array:
+ case BuiltinObjectKind::Iterator:
+ case BuiltinObjectKind::Map:
+ case BuiltinObjectKind::Promise:
+ case BuiltinObjectKind::RegExp:
+ case BuiltinObjectKind::Set:
+ case BuiltinObjectKind::SharedArrayBuffer:
+ case BuiltinObjectKind::Symbol:
+ return false;
+
+ case BuiltinObjectKind::FunctionPrototype:
+ case BuiltinObjectKind::ObjectPrototype:
+ case BuiltinObjectKind::RegExpPrototype:
+ case BuiltinObjectKind::StringPrototype:
+ return true;
+
+ case BuiltinObjectKind::DateTimeFormatPrototype:
+ case BuiltinObjectKind::NumberFormatPrototype:
+ return true;
+
+ case BuiltinObjectKind::None:
+ break;
+ }
+ MOZ_CRASH("Unexpected builtin object kind");
+}
+
+BuiltinObjectKind js::BuiltinConstructorForName(
+ frontend::TaggedParserAtomIndex name) {
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::Array()) {
+ return BuiltinObjectKind::Array;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::ArrayBuffer()) {
+ return BuiltinObjectKind::ArrayBuffer;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::Int32Array()) {
+ return BuiltinObjectKind::Int32Array;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::Iterator()) {
+ return BuiltinObjectKind::Iterator;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::Map()) {
+ return BuiltinObjectKind::Map;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::Promise()) {
+ return BuiltinObjectKind::Promise;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::RegExp()) {
+ return BuiltinObjectKind::RegExp;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::Set()) {
+ return BuiltinObjectKind::Set;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::SharedArrayBuffer()) {
+ return BuiltinObjectKind::SharedArrayBuffer;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::Symbol()) {
+ return BuiltinObjectKind::Symbol;
+ }
+ return BuiltinObjectKind::None;
+}
+
+BuiltinObjectKind js::BuiltinPrototypeForName(
+ frontend::TaggedParserAtomIndex name) {
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::Function()) {
+ return BuiltinObjectKind::FunctionPrototype;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::Object()) {
+ return BuiltinObjectKind::ObjectPrototype;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::RegExp()) {
+ return BuiltinObjectKind::RegExpPrototype;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::String()) {
+ return BuiltinObjectKind::StringPrototype;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::DateTimeFormat()) {
+ return BuiltinObjectKind::DateTimeFormatPrototype;
+ }
+ if (name == frontend::TaggedParserAtomIndex::WellKnown::NumberFormat()) {
+ return BuiltinObjectKind::NumberFormatPrototype;
+ }
+ return BuiltinObjectKind::None;
+}
+
+JSObject* js::MaybeGetBuiltinObject(GlobalObject* global,
+ BuiltinObjectKind kind) {
+ JSProtoKey key = ToProtoKey(kind);
+ if (IsPrototype(kind)) {
+ return global->maybeGetPrototype(key);
+ }
+ return global->maybeGetConstructor(key);
+}
+
+JSObject* js::GetOrCreateBuiltinObject(JSContext* cx, BuiltinObjectKind kind) {
+ JSProtoKey key = ToProtoKey(kind);
+ if (IsPrototype(kind)) {
+ return GlobalObject::getOrCreatePrototype(cx, key);
+ }
+ return GlobalObject::getOrCreateConstructor(cx, key);
+}
+
+const char* js::BuiltinObjectName(BuiltinObjectKind kind) {
+ switch (kind) {
+ case BuiltinObjectKind::Array:
+ return "Array";
+ case BuiltinObjectKind::ArrayBuffer:
+ return "ArrayBuffer";
+ case BuiltinObjectKind::Int32Array:
+ return "Int32Array";
+ case BuiltinObjectKind::Iterator:
+ return "Iterator";
+ case BuiltinObjectKind::Map:
+ return "Map";
+ case BuiltinObjectKind::Promise:
+ return "Promise";
+ case BuiltinObjectKind::RegExp:
+ return "RegExp";
+ case BuiltinObjectKind::SharedArrayBuffer:
+ return "SharedArrayBuffer";
+ case BuiltinObjectKind::Set:
+ return "Set";
+ case BuiltinObjectKind::Symbol:
+ return "Symbol";
+
+ case BuiltinObjectKind::FunctionPrototype:
+ return "Function.prototype";
+ case BuiltinObjectKind::ObjectPrototype:
+ return "Object.prototype";
+ case BuiltinObjectKind::RegExpPrototype:
+ return "RegExp.prototype";
+ case BuiltinObjectKind::StringPrototype:
+ return "String.prototype";
+
+ case BuiltinObjectKind::DateTimeFormatPrototype:
+ return "DateTimeFormat.prototype";
+ case BuiltinObjectKind::NumberFormatPrototype:
+ return "NumberFormat.prototype";
+
+ case BuiltinObjectKind::None:
+ break;
+ }
+ MOZ_CRASH("Unexpected builtin object kind");
+}
diff --git a/js/src/vm/BuiltinObjectKind.h b/js/src/vm/BuiltinObjectKind.h
new file mode 100644
index 0000000000..30808ef977
--- /dev/null
+++ b/js/src/vm/BuiltinObjectKind.h
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BuiltinObjectKind_h
+#define vm_BuiltinObjectKind_h
+
+#include <stdint.h>
+
+#include "jstypes.h"
+
+class JS_PUBLIC_API JSAtom;
+struct JS_PUBLIC_API JSContext;
+class JS_PUBLIC_API JSObject;
+
+namespace js {
+
+namespace frontend {
+class TaggedParserAtomIndex;
+}
+
+class GlobalObject;
+
+/**
+ * Built-in objects used by the GetBuiltinConstructor and GetBuiltinPrototype
+ * self-hosted intrinsics.
+ */
+enum class BuiltinObjectKind : uint8_t {
+ // Built-in constructors.
+ Array,
+ ArrayBuffer,
+ Int32Array,
+ Iterator,
+ Map,
+ Promise,
+ RegExp,
+ Set,
+ SharedArrayBuffer,
+ Symbol,
+
+ // Built-in prototypes.
+ FunctionPrototype,
+ ObjectPrototype,
+ RegExpPrototype,
+ StringPrototype,
+
+ // Built-in Intl prototypes.
+ DateTimeFormatPrototype,
+ NumberFormatPrototype,
+
+ // Invalid placeholder.
+ None,
+};
+
+/**
+ * Return the BuiltinObjectKind for the given constructor name. Return
+ * BuiltinObjectKind::None if no matching constructor was found.
+ */
+BuiltinObjectKind BuiltinConstructorForName(
+ frontend::TaggedParserAtomIndex name);
+
+/**
+ * Return the BuiltinObjectKind for the given prototype name. Return
+ * BuiltinObjectKind::None if no matching prototype was found.
+ */
+BuiltinObjectKind BuiltinPrototypeForName(frontend::TaggedParserAtomIndex name);
+
+/**
+ * Return the built-in object if already created for the given global. Otherwise
+ * return nullptr.
+ */
+JSObject* MaybeGetBuiltinObject(GlobalObject* global, BuiltinObjectKind kind);
+
+/**
+ * Return the built-in object for the given global.
+ */
+JSObject* GetOrCreateBuiltinObject(JSContext* cx, BuiltinObjectKind kind);
+
+/**
+ * Return the display name for a built-in object.
+ */
+const char* BuiltinObjectName(BuiltinObjectKind kind);
+
+} // namespace js
+
+#endif /* vm_BuiltinObjectKind_h */
diff --git a/js/src/vm/BytecodeFormatFlags.h b/js/src/vm/BytecodeFormatFlags.h
new file mode 100644
index 0000000000..893f0f0823
--- /dev/null
+++ b/js/src/vm/BytecodeFormatFlags.h
@@ -0,0 +1,61 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BytecodeFormatFlags_h
+#define vm_BytecodeFormatFlags_h
+
+/*
+ * [SMDOC] Bytecode Format flags (JOF_*)
+ */
+enum {
+ JOF_BYTE = 0, /* single bytecode, no immediates */
+ JOF_UINT8 = 1, /* unspecified uint8_t argument */
+ JOF_UINT16 = 2, /* unspecified uint16_t argument */
+ JOF_UINT24 = 3, /* unspecified uint24_t argument */
+ JOF_UINT32 = 4, /* unspecified uint32_t argument */
+ JOF_INT8 = 5, /* int8_t literal */
+ JOF_INT32 = 6, /* int32_t literal */
+ JOF_JUMP = 7, /* int32_t jump offset */
+ JOF_TABLESWITCH = 8, /* table switch */
+ JOF_ENVCOORD = 9, /* embedded ScopeCoordinate immediate */
+ JOF_ARGC = 10, /* uint16_t argument count */
+ JOF_QARG = 11, /* function argument index */
+ JOF_LOCAL = 12, /* var or block-local variable */
+ JOF_RESUMEINDEX = 13, /* yield or await resume index */
+ JOF_DOUBLE = 14, /* inline DoubleValue */
+ JOF_GCTHING = 15, /* uint32_t generic gc-thing index */
+ JOF_ATOM = 16, /* uint32_t constant index */
+ JOF_OBJECT = 17, /* uint32_t object index */
+ JOF_REGEXP = 18, /* uint32_t regexp index */
+ JOF_SCOPE = 19, /* uint32_t scope index */
+ JOF_BIGINT = 20, /* uint32_t index for BigInt value */
+ JOF_ICINDEX = 21, /* uint32_t IC index */
+ JOF_LOOPHEAD = 22, /* JSOp::LoopHead, combines JOF_ICINDEX and JOF_UINT8 */
+ JOF_TWO_UINT8 = 23, /* A pair of unspecified uint8_t arguments */
+ JOF_DEBUGCOORD = 24, /* An embedded ScopeCoordinate immediate that may
+ traverse DebugEnvironmentProxies*/
+ JOF_SHAPE = 25, /* uint32_t shape index */
+ JOF_STRING = 26, /* uint32_t constant index */
+ JOF_TYPEMASK = 0xFF, /* mask for above immediate types */
+
+ JOF_NAME = 1 << 8, /* name operation */
+ JOF_PROP = 2 << 8, /* obj.prop operation */
+ JOF_ELEM = 3 << 8, /* obj[index] operation */
+ JOF_MODEMASK = 0xFF << 8, /* mask for above addressing modes */
+
+ JOF_PROPSET = 1 << 16, /* property/element/name set operation */
+ JOF_PROPINIT = 1 << 17, /* property/element/name init operation */
+ JOF_CHECKSLOPPY = 1 << 18, /* op can only be generated in sloppy mode */
+ JOF_CHECKSTRICT = 1 << 19, /* op can only be generated in strict mode */
+ JOF_INVOKE = 1 << 20, /* any call, construct, or eval instruction */
+ JOF_CONSTRUCT = 1 << 21, /* invoke instruction using [[Construct]] entry */
+ JOF_SPREAD = 1 << 22, /* invoke instruction using spread argument */
+ JOF_GNAME = 1 << 23, /* predicted global name */
+ JOF_IC = 1 << 24, /* baseline may use an IC for this op */
+ JOF_USES_ENV = 1 << 25, /* op uses the frame's environment chain */
+};
+
+#endif /* vm_BytecodeFormatFlags_h */
diff --git a/js/src/vm/BytecodeIterator-inl.h b/js/src/vm/BytecodeIterator-inl.h
new file mode 100644
index 0000000000..37e42fc88d
--- /dev/null
+++ b/js/src/vm/BytecodeIterator-inl.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BytecodeIterator_inl_h
+#define vm_BytecodeIterator_inl_h
+
+#include "vm/BytecodeIterator.h"
+
+#include "vm/JSScript.h"
+
+namespace js {
+
+inline BytecodeIterator::BytecodeIterator(const JSScript* script)
+ : current_(script, script->code()) {}
+
+// AllBytecodesIterable
+
+inline BytecodeIterator AllBytecodesIterable::begin() {
+ return BytecodeIterator(script_);
+}
+
+inline BytecodeIterator AllBytecodesIterable::end() {
+ return BytecodeIterator(BytecodeLocation(script_, script_->codeEnd()));
+}
+
+// BytecodeLocationRange
+
+inline BytecodeIterator BytecodeLocationRange::begin() {
+ return BytecodeIterator(beginLoc_);
+}
+
+inline BytecodeIterator BytecodeLocationRange::end() {
+ return BytecodeIterator(endLoc_);
+}
+
+} // namespace js
+#endif
diff --git a/js/src/vm/BytecodeIterator.h b/js/src/vm/BytecodeIterator.h
new file mode 100644
index 0000000000..afc84e0451
--- /dev/null
+++ b/js/src/vm/BytecodeIterator.h
@@ -0,0 +1,85 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BytecodeIterator_h
+#define vm_BytecodeIterator_h
+
+#include "vm/BytecodeLocation.h"
+
+namespace js {
+
+class BytecodeIterator {
+ BytecodeLocation current_;
+
+ public:
+ inline explicit BytecodeIterator(const JSScript* script);
+
+ explicit BytecodeIterator(BytecodeLocation loc) : current_(loc) {}
+
+ BytecodeIterator& operator=(const BytecodeIterator&) = default;
+
+ bool operator==(const BytecodeIterator& other) const {
+ return other.current_ == current_;
+ }
+
+ bool operator!=(const BytecodeIterator& other) const {
+ return !(other.current_ == current_);
+ }
+
+ const BytecodeLocation& operator*() const { return current_; }
+
+ const BytecodeLocation* operator->() const { return &current_; }
+
+ // Pre-increment
+ BytecodeIterator& operator++() {
+ current_ = current_.next();
+ return *this;
+ }
+
+ // Post-increment
+ BytecodeIterator operator++(int) {
+ BytecodeIterator previous(*this);
+ current_ = current_.next();
+ return previous;
+ }
+};
+
+// Given a JSScript, allow the construction of a range based for-loop
+// that will visit all script locations in that script.
+class AllBytecodesIterable {
+ const JSScript* script_;
+
+ public:
+ explicit AllBytecodesIterable(const JSScript* script) : script_(script) {}
+
+ BytecodeIterator begin();
+ BytecodeIterator end();
+};
+
+// Construct a range based iterator that will visit all bytecode locations
+// between two given bytecode locations.
+// `beginLoc_` is the bytecode location where the iterator will start, and
+// `endLoc_` is the bytecode location where the iterator will end.
+class BytecodeLocationRange {
+ BytecodeLocation beginLoc_;
+ BytecodeLocation endLoc_;
+
+ public:
+ explicit BytecodeLocationRange(BytecodeLocation beginLoc,
+ BytecodeLocation endLoc)
+ : beginLoc_(beginLoc), endLoc_(endLoc) {
+#ifdef DEBUG
+ MOZ_ASSERT(beginLoc.hasSameScript(endLoc));
+#endif
+ }
+
+ BytecodeIterator begin();
+ BytecodeIterator end();
+};
+
+} // namespace js
+
+#endif
diff --git a/js/src/vm/BytecodeLocation-inl.h b/js/src/vm/BytecodeLocation-inl.h
new file mode 100644
index 0000000000..46c945ddad
--- /dev/null
+++ b/js/src/vm/BytecodeLocation-inl.h
@@ -0,0 +1,115 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BytecodeLocation_inl_h
+#define vm_BytecodeLocation_inl_h
+
+#include "vm/BytecodeLocation.h"
+
+#include "vm/JSScript.h"
+
+#include "vm/BytecodeUtil-inl.h"
+#include "vm/JSScript-inl.h"
+
+namespace js {
+
+inline uint32_t BytecodeLocation::bytecodeToOffset(
+ const JSScript* script) const {
+ MOZ_ASSERT(this->isInBounds());
+ return script->pcToOffset(this->rawBytecode_);
+}
+
+inline JSAtom* BytecodeLocation::getAtom(const JSScript* script) const {
+ MOZ_ASSERT(this->isValid());
+ return script->getAtom(this->rawBytecode_);
+}
+
+inline JSString* BytecodeLocation::getString(const JSScript* script) const {
+ MOZ_ASSERT(this->isValid());
+ return script->getString(this->rawBytecode_);
+}
+
+inline PropertyName* BytecodeLocation::getPropertyName(
+ const JSScript* script) const {
+ MOZ_ASSERT(this->isValid());
+ return script->getName(this->rawBytecode_);
+}
+
+inline JS::BigInt* BytecodeLocation::getBigInt(const JSScript* script) const {
+ MOZ_ASSERT(this->isValid());
+ MOZ_ASSERT(is(JSOp::BigInt));
+ return script->getBigInt(this->rawBytecode_);
+}
+
+inline JSObject* BytecodeLocation::getObject(const JSScript* script) const {
+ MOZ_ASSERT(this->isValid());
+ MOZ_ASSERT(is(JSOp::CallSiteObj) || is(JSOp::Object));
+ return script->getObject(this->rawBytecode_);
+}
+
+inline JSFunction* BytecodeLocation::getFunction(const JSScript* script) const {
+ MOZ_ASSERT(this->isValid());
+ MOZ_ASSERT(is(JSOp::Lambda) || is(JSOp::FunWithProto));
+ return script->getFunction(this->rawBytecode_);
+}
+
+inline js::RegExpObject* BytecodeLocation::getRegExp(
+ const JSScript* script) const {
+ MOZ_ASSERT(this->isValid());
+ MOZ_ASSERT(is(JSOp::RegExp));
+ return script->getRegExp(this->rawBytecode_);
+}
+
+inline js::Scope* BytecodeLocation::getScope(const JSScript* script) const {
+ MOZ_ASSERT(this->isValid());
+ return script->getScope(this->rawBytecode_);
+}
+
+inline Scope* BytecodeLocation::innermostScope(const JSScript* script) const {
+ MOZ_ASSERT(this->isValid());
+ return script->innermostScope(this->rawBytecode_);
+}
+
+inline uint32_t BytecodeLocation::tableSwitchCaseOffset(
+ const JSScript* script, uint32_t caseIndex) const {
+ return script->tableSwitchCaseOffset(this->rawBytecode_, caseIndex);
+}
+
+inline uint32_t BytecodeLocation::getJumpTargetOffset(
+ const JSScript* script) const {
+ MOZ_ASSERT(this->isJump());
+ return this->bytecodeToOffset(script) + GET_JUMP_OFFSET(this->rawBytecode_);
+}
+
+inline uint32_t BytecodeLocation::getTableSwitchDefaultOffset(
+ const JSScript* script) const {
+ MOZ_ASSERT(this->is(JSOp::TableSwitch));
+ return this->bytecodeToOffset(script) + GET_JUMP_OFFSET(this->rawBytecode_);
+}
+
+BytecodeLocation BytecodeLocation::getTableSwitchDefaultTarget() const {
+ MOZ_ASSERT(is(JSOp::TableSwitch));
+ return BytecodeLocation(*this, rawBytecode_ + GET_JUMP_OFFSET(rawBytecode_));
+}
+
+BytecodeLocation BytecodeLocation::getTableSwitchCaseTarget(
+ const JSScript* script, uint32_t caseIndex) const {
+ MOZ_ASSERT(is(JSOp::TableSwitch));
+ jsbytecode* casePC = script->tableSwitchCasePC(rawBytecode_, caseIndex);
+ return BytecodeLocation(*this, casePC);
+}
+
+inline uint32_t BytecodeLocation::useCount() const {
+ return GetUseCount(this->rawBytecode_);
+}
+
+inline uint32_t BytecodeLocation::defCount() const {
+ return GetDefCount(this->rawBytecode_);
+}
+
+} // namespace js
+
+#endif
diff --git a/js/src/vm/BytecodeLocation.cpp b/js/src/vm/BytecodeLocation.cpp
new file mode 100644
index 0000000000..fae05a9275
--- /dev/null
+++ b/js/src/vm/BytecodeLocation.cpp
@@ -0,0 +1,28 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/BytecodeLocation-inl.h"
+
+#include "vm/JSScript.h"
+
+using namespace js;
+
+#ifdef DEBUG
+bool BytecodeLocation::isValid(const JSScript* script) const {
+ // Note: Don't create a new BytecodeLocation during the implementation of
+ // this, as it is used in the constructor, and will recurse forever.
+ return script->contains(*this) || toRawBytecode() == script->codeEnd();
+}
+
+bool BytecodeLocation::isInBounds(const JSScript* script) const {
+ return script->contains(*this);
+}
+
+const JSScript* BytecodeLocation::getDebugOnlyScript() const {
+ return this->debugOnlyScript_;
+}
+
+#endif // DEBUG
diff --git a/js/src/vm/BytecodeLocation.h b/js/src/vm/BytecodeLocation.h
new file mode 100644
index 0000000000..e5876ed9d2
--- /dev/null
+++ b/js/src/vm/BytecodeLocation.h
@@ -0,0 +1,354 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BytecodeLocation_h
+#define vm_BytecodeLocation_h
+
+#include "frontend/NameAnalysisTypes.h"
+#include "js/TypeDecls.h"
+#include "vm/AsyncFunctionResolveKind.h"
+#include "vm/BuiltinObjectKind.h"
+#include "vm/BytecodeUtil.h"
+#include "vm/CheckIsObjectKind.h" // CheckIsObjectKind
+#include "vm/CompletionKind.h" // CompletionKind
+#include "vm/FunctionPrefixKind.h" // FunctionPrefixKind
+#include "vm/GeneratorResumeKind.h"
+
+namespace js {
+
+using RawBytecodeLocationOffset = uint32_t;
+
+class PropertyName;
+class RegExpObject;
+
+class BytecodeLocationOffset {
+ RawBytecodeLocationOffset rawOffset_;
+
+ public:
+ explicit BytecodeLocationOffset(RawBytecodeLocationOffset offset)
+ : rawOffset_(offset) {}
+
+ RawBytecodeLocationOffset rawOffset() const { return rawOffset_; }
+};
+
+using RawBytecode = jsbytecode*;
+
+// A immutable representation of a program location
+//
+class BytecodeLocation {
+ RawBytecode rawBytecode_;
+#ifdef DEBUG
+ const JSScript* debugOnlyScript_;
+#endif
+
+ // Construct a new BytecodeLocation, while borrowing scriptIdentity
+ // from some other BytecodeLocation.
+ BytecodeLocation(const BytecodeLocation& loc, RawBytecode pc)
+ : rawBytecode_(pc)
+#ifdef DEBUG
+ ,
+ debugOnlyScript_(loc.debugOnlyScript_)
+#endif
+ {
+ MOZ_ASSERT(isValid());
+ }
+
+ public:
+ // Disallow the creation of an uninitialized location.
+ BytecodeLocation() = delete;
+
+ BytecodeLocation(const JSScript* script, RawBytecode pc)
+ : rawBytecode_(pc)
+#ifdef DEBUG
+ ,
+ debugOnlyScript_(script)
+#endif
+ {
+ MOZ_ASSERT(isValid());
+ }
+
+ RawBytecode toRawBytecode() const { return rawBytecode_; }
+
+#ifdef DEBUG
+ // Return true if this bytecode location is valid for the given script.
+ // This includes the location 1-past the end of the bytecode.
+ bool isValid(const JSScript* script) const;
+
+ // Return true if this bytecode location is within the bounds of the
+ // bytecode for a given script.
+ bool isInBounds(const JSScript* script) const;
+
+ const JSScript* getDebugOnlyScript() const;
+#endif
+
+ inline uint32_t bytecodeToOffset(const JSScript* script) const;
+
+ inline uint32_t tableSwitchCaseOffset(const JSScript* script,
+ uint32_t caseIndex) const;
+
+ inline uint32_t getJumpTargetOffset(const JSScript* script) const;
+
+ inline uint32_t getTableSwitchDefaultOffset(const JSScript* script) const;
+
+ inline BytecodeLocation getTableSwitchDefaultTarget() const;
+ inline BytecodeLocation getTableSwitchCaseTarget(const JSScript* script,
+ uint32_t caseIndex) const;
+
+ inline uint32_t useCount() const;
+ inline uint32_t defCount() const;
+
+ int32_t jumpOffset() const { return GET_JUMP_OFFSET(rawBytecode_); }
+
+ inline JSAtom* getAtom(const JSScript* script) const;
+ inline JSString* getString(const JSScript* script) const;
+ inline PropertyName* getPropertyName(const JSScript* script) const;
+ inline JS::BigInt* getBigInt(const JSScript* script) const;
+ inline JSObject* getObject(const JSScript* script) const;
+ inline JSFunction* getFunction(const JSScript* script) const;
+ inline js::RegExpObject* getRegExp(const JSScript* script) const;
+ inline js::Scope* getScope(const JSScript* script) const;
+
+ uint32_t getSymbolIndex() const {
+ MOZ_ASSERT(is(JSOp::Symbol));
+ return GET_UINT8(rawBytecode_);
+ }
+
+ inline Scope* innermostScope(const JSScript* script) const;
+
+#ifdef DEBUG
+ bool hasSameScript(const BytecodeLocation& other) const {
+ return debugOnlyScript_ == other.debugOnlyScript_;
+ }
+#endif
+
+ // Overloaded operators
+
+ bool operator==(const BytecodeLocation& other) const {
+ MOZ_ASSERT(this->debugOnlyScript_ == other.debugOnlyScript_);
+ return rawBytecode_ == other.rawBytecode_;
+ }
+
+ bool operator!=(const BytecodeLocation& other) const {
+ return !(other == *this);
+ }
+
+ bool operator<(const BytecodeLocation& other) const {
+ MOZ_ASSERT(this->debugOnlyScript_ == other.debugOnlyScript_);
+ return rawBytecode_ < other.rawBytecode_;
+ }
+
+ // It is traditional to represent the rest of the relational operators
+ // using operator<, so we don't need to assert for these.
+ bool operator>(const BytecodeLocation& other) const { return other < *this; }
+
+ bool operator<=(const BytecodeLocation& other) const {
+ return !(other < *this);
+ }
+
+ bool operator>=(const BytecodeLocation& other) const {
+ return !(*this < other);
+ }
+
+ // Return the next bytecode
+ BytecodeLocation next() const {
+ return BytecodeLocation(*this,
+ rawBytecode_ + GetBytecodeLength(rawBytecode_));
+ }
+
+ // Add an offset.
+ BytecodeLocation operator+(const BytecodeLocationOffset& offset) const {
+ return BytecodeLocation(*this, rawBytecode_ + offset.rawOffset());
+ }
+
+ // Identity Checks
+ bool is(JSOp op) const {
+ MOZ_ASSERT(isInBounds());
+ return getOp() == op;
+ }
+
+ // Accessors:
+
+ uint32_t length() const { return GetBytecodeLength(rawBytecode_); }
+
+ bool isJumpTarget() const { return BytecodeIsJumpTarget(getOp()); }
+
+ bool isJump() const { return IsJumpOpcode(getOp()); }
+
+ bool isBackedge() const { return IsBackedgePC(rawBytecode_); }
+
+ bool isBackedgeForLoophead(BytecodeLocation loopHead) const {
+ return IsBackedgeForLoopHead(rawBytecode_, loopHead.rawBytecode_);
+ }
+
+ bool opHasIC() const { return BytecodeOpHasIC(getOp()); }
+
+ bool fallsThrough() const { return BytecodeFallsThrough(getOp()); }
+
+ uint32_t icIndex() const { return GET_ICINDEX(rawBytecode_); }
+
+ uint32_t local() const { return GET_LOCALNO(rawBytecode_); }
+
+ uint16_t arg() const { return GET_ARGNO(rawBytecode_); }
+
+ bool isEqualityOp() const { return IsEqualityOp(getOp()); }
+
+ bool isStrictEqualityOp() const { return IsStrictEqualityOp(getOp()); }
+
+ bool isStrictSetOp() const { return IsStrictSetPC(rawBytecode_); }
+
+ bool isNameOp() const { return IsNameOp(getOp()); }
+
+ bool isSpreadOp() const { return IsSpreadOp(getOp()); }
+
+ bool isInvokeOp() const { return IsInvokeOp(getOp()); }
+
+ bool isGetPropOp() const { return IsGetPropOp(getOp()); }
+ bool isGetElemOp() const { return IsGetElemOp(getOp()); }
+
+ bool isSetPropOp() const { return IsSetPropOp(getOp()); }
+ bool isSetElemOp() const { return IsSetElemOp(getOp()); }
+
+ AsyncFunctionResolveKind getAsyncFunctionResolveKind() {
+ return AsyncFunctionResolveKind(GET_UINT8(rawBytecode_));
+ }
+
+ bool resultIsPopped() const {
+ MOZ_ASSERT(StackDefs(getOp()) == 1);
+ return BytecodeIsPopped(rawBytecode_);
+ }
+
+ // Accessors:
+ JSOp getOp() const { return JSOp(*rawBytecode_); }
+
+ BytecodeLocation getJumpTarget() const {
+ MOZ_ASSERT(isJump());
+ return BytecodeLocation(*this,
+ rawBytecode_ + GET_JUMP_OFFSET(rawBytecode_));
+ }
+
+ // Return the 'low' parameter to the tableswitch opcode
+ int32_t getTableSwitchLow() const {
+ MOZ_ASSERT(is(JSOp::TableSwitch));
+ return GET_JUMP_OFFSET(rawBytecode_ + JUMP_OFFSET_LEN);
+ }
+
+ // Return the 'high' parameter to the tableswitch opcode
+ int32_t getTableSwitchHigh() const {
+ MOZ_ASSERT(is(JSOp::TableSwitch));
+ return GET_JUMP_OFFSET(rawBytecode_ + (2 * JUMP_OFFSET_LEN));
+ }
+
+ uint32_t getPopCount() const {
+ MOZ_ASSERT(is(JSOp::PopN));
+ return GET_UINT16(rawBytecode_);
+ }
+
+ uint32_t getDupAtIndex() const {
+ MOZ_ASSERT(is(JSOp::DupAt));
+ return GET_UINT24(rawBytecode_);
+ }
+
+ uint8_t getPickDepth() const {
+ MOZ_ASSERT(is(JSOp::Pick));
+ return GET_UINT8(rawBytecode_);
+ }
+ uint8_t getUnpickDepth() const {
+ MOZ_ASSERT(is(JSOp::Unpick));
+ return GET_UINT8(rawBytecode_);
+ }
+
+ uint32_t getEnvCalleeNumHops() const {
+ MOZ_ASSERT(is(JSOp::EnvCallee));
+ return GET_UINT8(rawBytecode_);
+ }
+
+ EnvironmentCoordinate getEnvironmentCoordinate() const {
+ MOZ_ASSERT(JOF_OPTYPE(getOp()) == JOF_ENVCOORD);
+ return EnvironmentCoordinate(rawBytecode_);
+ }
+
+ uint32_t getCallArgc() const {
+ MOZ_ASSERT(JOF_OPTYPE(getOp()) == JOF_ARGC);
+ return GET_ARGC(rawBytecode_);
+ }
+
+ uint32_t getInitElemArrayIndex() const {
+ MOZ_ASSERT(is(JSOp::InitElemArray));
+ uint32_t index = GET_UINT32(rawBytecode_);
+ MOZ_ASSERT(index <= INT32_MAX,
+ "the bytecode emitter must never generate JSOp::InitElemArray "
+ "with an index exceeding int32_t range");
+ return index;
+ }
+
+ FunctionPrefixKind getFunctionPrefixKind() const {
+ MOZ_ASSERT(is(JSOp::SetFunName));
+ return FunctionPrefixKind(GET_UINT8(rawBytecode_));
+ }
+
+ CheckIsObjectKind getCheckIsObjectKind() const {
+ MOZ_ASSERT(is(JSOp::CheckIsObj));
+ return CheckIsObjectKind(GET_UINT8(rawBytecode_));
+ }
+
+ BuiltinObjectKind getBuiltinObjectKind() const {
+ MOZ_ASSERT(is(JSOp::BuiltinObject));
+ return BuiltinObjectKind(GET_UINT8(rawBytecode_));
+ }
+
+ CompletionKind getCompletionKind() const {
+ MOZ_ASSERT(is(JSOp::CloseIter));
+ return CompletionKind(GET_UINT8(rawBytecode_));
+ }
+
+ uint32_t getNewArrayLength() const {
+ MOZ_ASSERT(is(JSOp::NewArray));
+ return GET_UINT32(rawBytecode_);
+ }
+
+ int8_t getInt8() const {
+ MOZ_ASSERT(is(JSOp::Int8));
+ return GET_INT8(rawBytecode_);
+ }
+ uint16_t getUint16() const {
+ MOZ_ASSERT(is(JSOp::Uint16));
+ return GET_UINT16(rawBytecode_);
+ }
+ uint32_t getUint24() const {
+ MOZ_ASSERT(is(JSOp::Uint24));
+ return GET_UINT24(rawBytecode_);
+ }
+ int32_t getInt32() const {
+ MOZ_ASSERT(is(JSOp::Int32));
+ return GET_INT32(rawBytecode_);
+ }
+ uint32_t getResumeIndex() const {
+ MOZ_ASSERT(is(JSOp::InitialYield) || is(JSOp::Yield) || is(JSOp::Await));
+ return GET_RESUMEINDEX(rawBytecode_);
+ }
+ Value getInlineValue() const {
+ MOZ_ASSERT(is(JSOp::Double));
+ return GET_INLINE_VALUE(rawBytecode_);
+ }
+
+ GeneratorResumeKind resumeKind() { return ResumeKindFromPC(rawBytecode_); }
+
+ ThrowMsgKind throwMsgKind() {
+ MOZ_ASSERT(is(JSOp::ThrowMsg));
+ return static_cast<ThrowMsgKind>(GET_UINT8(rawBytecode_));
+ }
+
+#ifdef DEBUG
+ // To ease writing assertions
+ bool isValid() const { return isValid(debugOnlyScript_); }
+
+ bool isInBounds() const { return isInBounds(debugOnlyScript_); }
+#endif
+};
+
+} // namespace js
+
+#endif
diff --git a/js/src/vm/BytecodeUtil-inl.h b/js/src/vm/BytecodeUtil-inl.h
new file mode 100644
index 0000000000..f7b944b0dd
--- /dev/null
+++ b/js/src/vm/BytecodeUtil-inl.h
@@ -0,0 +1,242 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BytecodeUtil_inl_h
+#define vm_BytecodeUtil_inl_h
+
+#include "vm/BytecodeUtil.h"
+
+#include "frontend/SourceNotes.h" // SrcNote, SrcNoteType, SrcNoteIterator
+#include "vm/JSScript.h"
+
+namespace js {
+
+static inline unsigned GetDefCount(jsbytecode* pc) {
+ /*
+ * Add an extra pushed value for Or/And opcodes, so that they are included
+ * in the pushed array of stack values for type inference.
+ */
+ JSOp op = JSOp(*pc);
+ switch (op) {
+ case JSOp::Or:
+ case JSOp::And:
+ case JSOp::Coalesce:
+ return 1;
+ case JSOp::Pick:
+ case JSOp::Unpick:
+ /*
+ * Pick pops and pushes how deep it looks in the stack + 1
+ * items. i.e. if the stack were |a b[2] c[1] d[0]|, pick 2
+ * would pop b, c, and d to rearrange the stack to |a c[0]
+ * d[1] b[2]|.
+ */
+ return pc[1] + 1;
+ default:
+ return StackDefs(op);
+ }
+}
+
+static inline unsigned GetUseCount(jsbytecode* pc) {
+ JSOp op = JSOp(*pc);
+ if (op == JSOp::Pick || op == JSOp::Unpick) {
+ return pc[1] + 1;
+ }
+
+ return StackUses(op, pc);
+}
+
+static inline JSOp ReverseCompareOp(JSOp op) {
+ switch (op) {
+ case JSOp::Gt:
+ return JSOp::Lt;
+ case JSOp::Ge:
+ return JSOp::Le;
+ case JSOp::Lt:
+ return JSOp::Gt;
+ case JSOp::Le:
+ return JSOp::Ge;
+ case JSOp::Eq:
+ case JSOp::Ne:
+ case JSOp::StrictEq:
+ case JSOp::StrictNe:
+ return op;
+ default:
+ MOZ_CRASH("unrecognized op");
+ }
+}
+
+static inline JSOp NegateCompareOp(JSOp op) {
+ switch (op) {
+ case JSOp::Gt:
+ return JSOp::Le;
+ case JSOp::Ge:
+ return JSOp::Lt;
+ case JSOp::Lt:
+ return JSOp::Ge;
+ case JSOp::Le:
+ return JSOp::Gt;
+ case JSOp::Eq:
+ return JSOp::Ne;
+ case JSOp::Ne:
+ return JSOp::Eq;
+ case JSOp::StrictNe:
+ return JSOp::StrictEq;
+ case JSOp::StrictEq:
+ return JSOp::StrictNe;
+ default:
+ MOZ_CRASH("unrecognized op");
+ }
+}
+
+class BytecodeRange {
+ public:
+ BytecodeRange(JSContext* cx, JSScript* script)
+ : script(cx, script), pc(script->code()), end(pc + script->length()) {}
+ bool empty() const { return pc == end; }
+ jsbytecode* frontPC() const { return pc; }
+ JSOp frontOpcode() const { return JSOp(*pc); }
+ size_t frontOffset() const { return script->pcToOffset(pc); }
+ void popFront() { pc += GetBytecodeLength(pc); }
+
+ private:
+ RootedScript script;
+ jsbytecode* pc;
+ jsbytecode* end;
+};
+
+class BytecodeRangeWithPosition : private BytecodeRange {
+ public:
+ using BytecodeRange::empty;
+ using BytecodeRange::frontOffset;
+ using BytecodeRange::frontOpcode;
+ using BytecodeRange::frontPC;
+
+ BytecodeRangeWithPosition(JSContext* cx, JSScript* script)
+ : BytecodeRange(cx, script),
+ initialLine(script->lineno()),
+ lineno(script->lineno()),
+ column(script->column()),
+ sn(script->notes()),
+ snpc(script->code()),
+ isEntryPoint(false),
+ isBreakpoint(false),
+ seenStepSeparator(false),
+ wasArtifactEntryPoint(false) {
+ if (!sn->isTerminator()) {
+ snpc += sn->delta();
+ }
+ updatePosition();
+ while (frontPC() != script->main()) {
+ popFront();
+ }
+
+ if (frontOpcode() != JSOp::JumpTarget) {
+ isEntryPoint = true;
+ } else {
+ wasArtifactEntryPoint = true;
+ }
+ }
+
+ void popFront() {
+ BytecodeRange::popFront();
+ if (empty()) {
+ isEntryPoint = false;
+ } else {
+ updatePosition();
+ }
+
+ // The following conditions are handling artifacts introduced by the
+ // bytecode emitter, such that we do not add breakpoints on empty
+ // statements of the source code of the user.
+ if (wasArtifactEntryPoint) {
+ wasArtifactEntryPoint = false;
+ isEntryPoint = true;
+ }
+
+ if (isEntryPoint && frontOpcode() == JSOp::JumpTarget) {
+ wasArtifactEntryPoint = isEntryPoint;
+ isEntryPoint = false;
+ }
+ }
+
+ size_t frontLineNumber() const { return lineno; }
+ size_t frontColumnNumber() const { return column; }
+
+ // Entry points are restricted to bytecode offsets that have an
+ // explicit mention in the line table. This restriction avoids a
+ // number of failing cases caused by some instructions not having
+ // sensible (to the user) line numbers, and it is one way to
+ // implement the idea that the bytecode emitter should tell the
+ // debugger exactly which offsets represent "interesting" (to the
+ // user) places to stop.
+ bool frontIsEntryPoint() const { return isEntryPoint; }
+
+ // Breakable points are explicitly marked by the emitter as locations where
+ // the debugger may want to allow users to pause.
+ bool frontIsBreakablePoint() const { return isBreakpoint; }
+
+ // Breakable step points are the first breakable point after a
+ // SrcNote::StepSep note has been encountered.
+ bool frontIsBreakableStepPoint() const {
+ return isBreakpoint && seenStepSeparator;
+ }
+
+ private:
+ void updatePosition() {
+ if (isBreakpoint) {
+ isBreakpoint = false;
+ seenStepSeparator = false;
+ }
+
+ // Determine the current line number by reading all source notes up to
+ // and including the current offset.
+ jsbytecode* lastLinePC = nullptr;
+ SrcNoteIterator iter(sn);
+ for (; !iter.atEnd() && snpc <= frontPC();
+ ++iter, snpc += (*iter)->delta()) {
+ auto sn = *iter;
+
+ SrcNoteType type = sn->type();
+ if (type == SrcNoteType::ColSpan) {
+ ptrdiff_t colspan = SrcNote::ColSpan::getSpan(sn);
+ MOZ_ASSERT(ptrdiff_t(column) + colspan >= 0);
+ column += colspan;
+ lastLinePC = snpc;
+ } else if (type == SrcNoteType::SetLine) {
+ lineno = SrcNote::SetLine::getLine(sn, initialLine);
+ column = 0;
+ lastLinePC = snpc;
+ } else if (type == SrcNoteType::NewLine) {
+ lineno++;
+ column = 0;
+ lastLinePC = snpc;
+ } else if (type == SrcNoteType::Breakpoint) {
+ isBreakpoint = true;
+ lastLinePC = snpc;
+ } else if (type == SrcNoteType::StepSep) {
+ seenStepSeparator = true;
+ lastLinePC = snpc;
+ }
+ }
+
+ sn = *iter;
+ isEntryPoint = lastLinePC == frontPC();
+ }
+
+ size_t initialLine;
+ size_t lineno;
+ size_t column;
+ const SrcNote* sn;
+ jsbytecode* snpc;
+ bool isEntryPoint;
+ bool isBreakpoint;
+ bool seenStepSeparator;
+ bool wasArtifactEntryPoint;
+};
+
+} // namespace js
+
+#endif /* vm_BytecodeUtil_inl_h */
diff --git a/js/src/vm/BytecodeUtil.cpp b/js/src/vm/BytecodeUtil.cpp
new file mode 100644
index 0000000000..2d73cf5340
--- /dev/null
+++ b/js/src/vm/BytecodeUtil.cpp
@@ -0,0 +1,3110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * JS bytecode descriptors, disassemblers, and (expression) decompilers.
+ */
+
+#include "vm/BytecodeUtil-inl.h"
+
+#define __STDC_FORMAT_MACROS
+
+#include "mozilla/Maybe.h"
+#include "mozilla/ReverseIterator.h"
+#include "mozilla/Sprintf.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "jsapi.h"
+#include "jstypes.h"
+
+#include "frontend/BytecodeCompiler.h"
+#include "frontend/SourceNotes.h" // SrcNote, SrcNoteType, SrcNoteIterator
+#include "gc/PublicIterators.h"
+#include "jit/IonScript.h" // IonBlockCounts
+#include "js/CharacterEncoding.h"
+#include "js/experimental/CodeCoverage.h"
+#include "js/experimental/PCCountProfiling.h" // JS::{Start,Stop}PCCountProfiling, JS::PurgePCCounts, JS::GetPCCountScript{Count,Summary,Contents}
+#include "js/friend/DumpFunctions.h" // js::DumpPC, js::DumpScript
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/Printer.h"
+#include "js/Printf.h"
+#include "js/Symbol.h"
+#include "util/DifferentialTesting.h"
+#include "util/Memory.h"
+#include "util/Text.h"
+#include "vm/BuiltinObjectKind.h"
+#include "vm/BytecodeIterator.h" // for AllBytecodesIterable
+#include "vm/BytecodeLocation.h"
+#include "vm/CodeCoverage.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/FrameIter.h" // js::{,Script}FrameIter
+#include "vm/JSAtom.h"
+#include "vm/JSContext.h"
+#include "vm/JSFunction.h"
+#include "vm/JSObject.h"
+#include "vm/JSONPrinter.h"
+#include "vm/JSScript.h"
+#include "vm/Opcodes.h"
+#include "vm/Realm.h"
+#include "vm/Shape.h"
+#include "vm/ToSource.h" // js::ValueToSource
+#include "vm/WellKnownAtom.h" // js_*_str
+
+#include "gc/GC-inl.h"
+#include "vm/BytecodeIterator-inl.h"
+#include "vm/JSContext-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/Realm-inl.h"
+
+using namespace js;
+
+using js::frontend::IsIdentifier;
+
+/*
+ * Index limit must stay within 32 bits.
+ */
+static_assert(sizeof(uint32_t) * CHAR_BIT >= INDEX_LIMIT_LOG2 + 1);
+
+const JSCodeSpec js::CodeSpecTable[] = {
+#define MAKE_CODESPEC(op, op_snake, token, length, nuses, ndefs, format) \
+ {length, nuses, ndefs, format},
+ FOR_EACH_OPCODE(MAKE_CODESPEC)
+#undef MAKE_CODESPEC
+};
+
+/*
+ * Each element of the array is either a source literal associated with JS
+ * bytecode or null.
+ */
+static const char* const CodeToken[] = {
+#define TOKEN(op, op_snake, token, ...) token,
+ FOR_EACH_OPCODE(TOKEN)
+#undef TOKEN
+};
+
+/*
+ * Array of JS bytecode names used by PC count JSON, DEBUG-only Disassemble
+ * and JIT debug spew.
+ */
+const char* const js::CodeNameTable[] = {
+#define OPNAME(op, ...) #op,
+ FOR_EACH_OPCODE(OPNAME)
+#undef OPNAME
+};
+
+/************************************************************************/
+
+static bool DecompileArgumentFromStack(JSContext* cx, int formalIndex,
+ UniqueChars* res);
+
+/* static */ const char PCCounts::numExecName[] = "interp";
+
+[[nodiscard]] static bool DumpIonScriptCounts(Sprinter* sp, HandleScript script,
+ jit::IonScriptCounts* ionCounts) {
+ if (!sp->jsprintf("IonScript [%zu blocks]:\n", ionCounts->numBlocks())) {
+ return false;
+ }
+
+ for (size_t i = 0; i < ionCounts->numBlocks(); i++) {
+ const jit::IonBlockCounts& block = ionCounts->block(i);
+ unsigned lineNumber = 0, columnNumber = 0;
+ lineNumber = PCToLineNumber(script, script->offsetToPC(block.offset()),
+ &columnNumber);
+ if (!sp->jsprintf("BB #%" PRIu32 " [%05u,%u,%u]", block.id(),
+ block.offset(), lineNumber, columnNumber)) {
+ return false;
+ }
+ if (block.description()) {
+ if (!sp->jsprintf(" [inlined %s]", block.description())) {
+ return false;
+ }
+ }
+ for (size_t j = 0; j < block.numSuccessors(); j++) {
+ if (!sp->jsprintf(" -> #%" PRIu32, block.successor(j))) {
+ return false;
+ }
+ }
+ if (!sp->jsprintf(" :: %" PRIu64 " hits\n", block.hitCount())) {
+ return false;
+ }
+ if (!sp->jsprintf("%s\n", block.code())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+[[nodiscard]] static bool DumpPCCounts(JSContext* cx, HandleScript script,
+ Sprinter* sp) {
+ MOZ_ASSERT(script->hasScriptCounts());
+
+ // Ensure the Disassemble1 call below does not discard the script counts.
+ gc::AutoSuppressGC suppress(cx);
+
+#ifdef DEBUG
+ jsbytecode* pc = script->code();
+ while (pc < script->codeEnd()) {
+ jsbytecode* next = GetNextPc(pc);
+
+ if (!Disassemble1(cx, script, pc, script->pcToOffset(pc), true, sp)) {
+ return false;
+ }
+
+ if (!sp->put(" {")) {
+ return false;
+ }
+
+ PCCounts* counts = script->maybeGetPCCounts(pc);
+ if (double val = counts ? counts->numExec() : 0.0) {
+ if (!sp->jsprintf("\"%s\": %.0f", PCCounts::numExecName, val)) {
+ return false;
+ }
+ }
+ if (!sp->put("}\n")) {
+ return false;
+ }
+
+ pc = next;
+ }
+#endif
+
+ jit::IonScriptCounts* ionCounts = script->getIonCounts();
+ while (ionCounts) {
+ if (!DumpIonScriptCounts(sp, script, ionCounts)) {
+ return false;
+ }
+
+ ionCounts = ionCounts->previous();
+ }
+
+ return true;
+}
+
+bool js::DumpRealmPCCounts(JSContext* cx) {
+ Rooted<GCVector<JSScript*>> scripts(cx, GCVector<JSScript*>(cx));
+ for (auto base = cx->zone()->cellIter<BaseScript>(); !base.done();
+ base.next()) {
+ if (base->realm() != cx->realm()) {
+ continue;
+ }
+ MOZ_ASSERT_IF(base->hasScriptCounts(), base->hasBytecode());
+ if (base->hasScriptCounts()) {
+ if (!scripts.append(base->asJSScript())) {
+ return false;
+ }
+ }
+ }
+
+ for (uint32_t i = 0; i < scripts.length(); i++) {
+ HandleScript script = scripts[i];
+ Sprinter sprinter(cx);
+ if (!sprinter.init()) {
+ return false;
+ }
+
+ const char* filename = script->filename();
+ if (!filename) {
+ filename = "(unknown)";
+ }
+ fprintf(stdout, "--- SCRIPT %s:%u ---\n", filename, script->lineno());
+ if (!DumpPCCounts(cx, script, &sprinter)) {
+ return false;
+ }
+ fputs(sprinter.string(), stdout);
+ fprintf(stdout, "--- END SCRIPT %s:%u ---\n", filename, script->lineno());
+ }
+
+ return true;
+}
+
+/////////////////////////////////////////////////////////////////////
+// Bytecode Parser
+/////////////////////////////////////////////////////////////////////
+
+// Stores the information about the stack slot, where the value comes from.
+// Elements of BytecodeParser::Bytecode.{offsetStack,offsetStackAfter} arrays.
+class OffsetAndDefIndex {
+ // The offset of the PC that pushed the value for this slot.
+ uint32_t offset_;
+
+ // The index in `ndefs` for the PC (0-origin)
+ uint8_t defIndex_;
+
+ enum : uint8_t {
+ Normal = 0,
+
+ // Ignored this value in the expression decompilation.
+ // Used by JSOp::NopDestructuring. See BytecodeParser::simulateOp.
+ Ignored,
+
+ // The value in this slot comes from 2 or more paths.
+ // offset_ and defIndex_ holds the information for the path that
+ // reaches here first.
+ Merged,
+ } type_;
+
+ public:
+ uint32_t offset() const {
+ MOZ_ASSERT(!isSpecial());
+ return offset_;
+ };
+ uint32_t specialOffset() const {
+ MOZ_ASSERT(isSpecial());
+ return offset_;
+ };
+
+ uint8_t defIndex() const {
+ MOZ_ASSERT(!isSpecial());
+ return defIndex_;
+ }
+ uint8_t specialDefIndex() const {
+ MOZ_ASSERT(isSpecial());
+ return defIndex_;
+ }
+
+ bool isSpecial() const { return type_ != Normal; }
+ bool isMerged() const { return type_ == Merged; }
+ bool isIgnored() const { return type_ == Ignored; }
+
+ void set(uint32_t aOffset, uint8_t aDefIndex) {
+ offset_ = aOffset;
+ defIndex_ = aDefIndex;
+ type_ = Normal;
+ }
+
+ // Keep offset_ and defIndex_ values for stack dump.
+ void setMerged() { type_ = Merged; }
+ void setIgnored() { type_ = Ignored; }
+
+ bool operator==(const OffsetAndDefIndex& rhs) const {
+ return offset_ == rhs.offset_ && defIndex_ == rhs.defIndex_;
+ }
+
+ bool operator!=(const OffsetAndDefIndex& rhs) const {
+ return !(*this == rhs);
+ }
+};
+
+namespace {
+
+class BytecodeParser {
+ public:
+ enum class JumpKind {
+ Simple,
+ SwitchCase,
+ SwitchDefault,
+ TryCatch,
+ TryFinally
+ };
+
+ private:
+ class Bytecode {
+ public:
+ explicit Bytecode(const LifoAllocPolicy<Fallible>& alloc)
+ : parsed(false),
+ stackDepth(0),
+ offsetStack(nullptr)
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ ,
+ stackDepthAfter(0),
+ offsetStackAfter(nullptr),
+ jumpOrigins(alloc)
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
+ {
+ }
+
+ // Whether this instruction has been analyzed to get its output defines
+ // and stack.
+ bool parsed;
+
+ // Stack depth before this opcode.
+ uint32_t stackDepth;
+
+ // Pointer to array of |stackDepth| offsets. An element at position N
+ // in the array is the offset of the opcode that defined the
+ // corresponding stack slot. The top of the stack is at position
+ // |stackDepth - 1|.
+ OffsetAndDefIndex* offsetStack;
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ // stack depth after this opcode.
+ uint32_t stackDepthAfter;
+
+ // Pointer to array of |stackDepthAfter| offsets.
+ OffsetAndDefIndex* offsetStackAfter;
+
+ struct JumpInfo {
+ uint32_t from;
+ JumpKind kind;
+
+ JumpInfo(uint32_t from_, JumpKind kind_) : from(from_), kind(kind_) {}
+ };
+
+ // A list of offsets of the bytecode that jumps to this bytecode,
+ // exclusing previous bytecode.
+ Vector<JumpInfo, 0, LifoAllocPolicy<Fallible>> jumpOrigins;
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
+
+ bool captureOffsetStack(LifoAlloc& alloc, const OffsetAndDefIndex* stack,
+ uint32_t depth) {
+ stackDepth = depth;
+ if (stackDepth) {
+ offsetStack = alloc.newArray<OffsetAndDefIndex>(stackDepth);
+ if (!offsetStack) {
+ return false;
+ }
+ for (uint32_t n = 0; n < stackDepth; n++) {
+ offsetStack[n] = stack[n];
+ }
+ }
+ return true;
+ }
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ bool captureOffsetStackAfter(LifoAlloc& alloc,
+ const OffsetAndDefIndex* stack,
+ uint32_t depth) {
+ stackDepthAfter = depth;
+ if (stackDepthAfter) {
+ offsetStackAfter = alloc.newArray<OffsetAndDefIndex>(stackDepthAfter);
+ if (!offsetStackAfter) {
+ return false;
+ }
+ for (uint32_t n = 0; n < stackDepthAfter; n++) {
+ offsetStackAfter[n] = stack[n];
+ }
+ }
+ return true;
+ }
+
+ bool addJump(uint32_t from, JumpKind kind) {
+ return jumpOrigins.append(JumpInfo(from, kind));
+ }
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
+
+ // When control-flow merges, intersect the stacks, marking slots that
+ // are defined by different offsets and/or defIndices merged.
+ // This is sufficient for forward control-flow. It doesn't grok loops
+ // -- for that you would have to iterate to a fixed point -- but there
+ // shouldn't be operands on the stack at a loop back-edge anyway.
+ void mergeOffsetStack(const OffsetAndDefIndex* stack, uint32_t depth) {
+ MOZ_ASSERT(depth == stackDepth);
+ for (uint32_t n = 0; n < stackDepth; n++) {
+ if (stack[n].isIgnored()) {
+ continue;
+ }
+ if (offsetStack[n].isIgnored()) {
+ offsetStack[n] = stack[n];
+ }
+ if (offsetStack[n] != stack[n]) {
+ offsetStack[n].setMerged();
+ }
+ }
+ }
+ };
+
+ JSContext* cx_;
+ LifoAlloc& alloc_;
+ RootedScript script_;
+
+ Bytecode** codeArray_;
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ // Dedicated mode for stack dump.
+ // Capture stack after each opcode, and also enable special handling for
+ // some opcodes to make stack transition clearer.
+ bool isStackDump;
+#endif
+
+ public:
+ BytecodeParser(JSContext* cx, LifoAlloc& alloc, JSScript* script)
+ : cx_(cx),
+ alloc_(alloc),
+ script_(cx, script),
+ codeArray_(nullptr)
+#ifdef DEBUG
+ ,
+ isStackDump(false)
+#endif
+ {
+ }
+
+ bool parse();
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ bool isReachable(const jsbytecode* pc) const { return maybeCode(pc); }
+#endif
+
+ uint32_t stackDepthAtPC(uint32_t offset) const {
+ // Sometimes the code generator in debug mode asks about the stack depth
+ // of unreachable code (bug 932180 comment 22). Assume that unreachable
+ // code has no operands on the stack.
+ return getCode(offset).stackDepth;
+ }
+ uint32_t stackDepthAtPC(const jsbytecode* pc) const {
+ return stackDepthAtPC(script_->pcToOffset(pc));
+ }
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ uint32_t stackDepthAfterPC(uint32_t offset) const {
+ return getCode(offset).stackDepthAfter;
+ }
+ uint32_t stackDepthAfterPC(const jsbytecode* pc) const {
+ return stackDepthAfterPC(script_->pcToOffset(pc));
+ }
+#endif
+
+ const OffsetAndDefIndex& offsetForStackOperand(uint32_t offset,
+ int operand) const {
+ Bytecode& code = getCode(offset);
+ if (operand < 0) {
+ operand += code.stackDepth;
+ MOZ_ASSERT(operand >= 0);
+ }
+ MOZ_ASSERT(uint32_t(operand) < code.stackDepth);
+ return code.offsetStack[operand];
+ }
+ jsbytecode* pcForStackOperand(jsbytecode* pc, int operand,
+ uint8_t* defIndex) const {
+ size_t offset = script_->pcToOffset(pc);
+ const OffsetAndDefIndex& offsetAndDefIndex =
+ offsetForStackOperand(offset, operand);
+ if (offsetAndDefIndex.isSpecial()) {
+ return nullptr;
+ }
+ *defIndex = offsetAndDefIndex.defIndex();
+ return script_->offsetToPC(offsetAndDefIndex.offset());
+ }
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ const OffsetAndDefIndex& offsetForStackOperandAfterPC(uint32_t offset,
+ int operand) const {
+ Bytecode& code = getCode(offset);
+ if (operand < 0) {
+ operand += code.stackDepthAfter;
+ MOZ_ASSERT(operand >= 0);
+ }
+ MOZ_ASSERT(uint32_t(operand) < code.stackDepthAfter);
+ return code.offsetStackAfter[operand];
+ }
+
+ template <typename Callback>
+ bool forEachJumpOrigins(jsbytecode* pc, Callback callback) const {
+ Bytecode& code = getCode(script_->pcToOffset(pc));
+
+ for (Bytecode::JumpInfo& info : code.jumpOrigins) {
+ if (!callback(script_->offsetToPC(info.from), info.kind)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ void setStackDump() { isStackDump = true; }
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
+
+ private:
+ LifoAlloc& alloc() { return alloc_; }
+
+ void reportOOM() { ReportOutOfMemory(cx_); }
+
+ uint32_t maximumStackDepth() const {
+ return script_->nslots() - script_->nfixed();
+ }
+
+ Bytecode& getCode(uint32_t offset) const {
+ MOZ_ASSERT(offset < script_->length());
+ MOZ_ASSERT(codeArray_[offset]);
+ return *codeArray_[offset];
+ }
+
+ Bytecode* maybeCode(uint32_t offset) const {
+ MOZ_ASSERT(offset < script_->length());
+ return codeArray_[offset];
+ }
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ Bytecode* maybeCode(const jsbytecode* pc) const {
+ return maybeCode(script_->pcToOffset(pc));
+ }
+#endif
+
+ uint32_t simulateOp(JSOp op, uint32_t offset, OffsetAndDefIndex* offsetStack,
+ uint32_t stackDepth);
+
+ inline bool recordBytecode(uint32_t offset,
+ const OffsetAndDefIndex* offsetStack,
+ uint32_t stackDepth);
+
+ inline bool addJump(uint32_t offset, uint32_t stackDepth,
+ const OffsetAndDefIndex* offsetStack, jsbytecode* pc,
+ JumpKind kind);
+};
+
+} // anonymous namespace
+
+uint32_t BytecodeParser::simulateOp(JSOp op, uint32_t offset,
+ OffsetAndDefIndex* offsetStack,
+ uint32_t stackDepth) {
+ jsbytecode* pc = script_->offsetToPC(offset);
+ uint32_t nuses = GetUseCount(pc);
+ uint32_t ndefs = GetDefCount(pc);
+
+ MOZ_RELEASE_ASSERT(stackDepth >= nuses);
+ stackDepth -= nuses;
+ MOZ_RELEASE_ASSERT(stackDepth + ndefs <= maximumStackDepth());
+
+#ifdef DEBUG
+ if (isStackDump) {
+ // Opcodes that modifies the object but keeps it on the stack while
+ // initialization should be listed here instead of switch below.
+ // For error message, they shouldn't be shown as the original object
+ // after adding properties.
+ // For stack dump, keeping the input is better.
+ switch (op) {
+ case JSOp::InitHiddenProp:
+ case JSOp::InitHiddenPropGetter:
+ case JSOp::InitHiddenPropSetter:
+ case JSOp::InitLockedProp:
+ case JSOp::InitProp:
+ case JSOp::InitPropGetter:
+ case JSOp::InitPropSetter:
+ case JSOp::SetFunName:
+ // Keep the second value.
+ MOZ_ASSERT(nuses == 2);
+ MOZ_ASSERT(ndefs == 1);
+ goto end;
+
+ case JSOp::InitElem:
+ case JSOp::InitElemGetter:
+ case JSOp::InitElemSetter:
+ case JSOp::InitHiddenElem:
+ case JSOp::InitHiddenElemGetter:
+ case JSOp::InitHiddenElemSetter:
+ case JSOp::InitLockedElem:
+ // Keep the third value.
+ MOZ_ASSERT(nuses == 3);
+ MOZ_ASSERT(ndefs == 1);
+ goto end;
+
+ default:
+ break;
+ }
+ }
+#endif /* DEBUG */
+
+ // Mark the current offset as defining its values on the offset stack,
+ // unless it just reshuffles the stack. In that case we want to preserve
+ // the opcode that generated the original value.
+ switch (op) {
+ default:
+ for (uint32_t n = 0; n != ndefs; ++n) {
+ offsetStack[stackDepth + n].set(offset, n);
+ }
+ break;
+
+ case JSOp::NopDestructuring:
+ // Poison the last offset to not obfuscate the error message.
+ offsetStack[stackDepth - 1].setIgnored();
+ break;
+
+ case JSOp::Case:
+ // Keep the switch value.
+ MOZ_ASSERT(ndefs == 1);
+ break;
+
+ case JSOp::Dup:
+ MOZ_ASSERT(ndefs == 2);
+ offsetStack[stackDepth + 1] = offsetStack[stackDepth];
+ break;
+
+ case JSOp::Dup2:
+ MOZ_ASSERT(ndefs == 4);
+ offsetStack[stackDepth + 2] = offsetStack[stackDepth];
+ offsetStack[stackDepth + 3] = offsetStack[stackDepth + 1];
+ break;
+
+ case JSOp::DupAt: {
+ MOZ_ASSERT(ndefs == 1);
+ unsigned n = GET_UINT24(pc);
+ MOZ_ASSERT(n < stackDepth);
+ offsetStack[stackDepth] = offsetStack[stackDepth - 1 - n];
+ break;
+ }
+
+ case JSOp::Swap: {
+ MOZ_ASSERT(ndefs == 2);
+ OffsetAndDefIndex tmp = offsetStack[stackDepth + 1];
+ offsetStack[stackDepth + 1] = offsetStack[stackDepth];
+ offsetStack[stackDepth] = tmp;
+ break;
+ }
+
+ case JSOp::Pick: {
+ unsigned n = GET_UINT8(pc);
+ MOZ_ASSERT(ndefs == n + 1);
+ uint32_t top = stackDepth + n;
+ OffsetAndDefIndex tmp = offsetStack[stackDepth];
+ for (uint32_t i = stackDepth; i < top; i++) {
+ offsetStack[i] = offsetStack[i + 1];
+ }
+ offsetStack[top] = tmp;
+ break;
+ }
+
+ case JSOp::Unpick: {
+ unsigned n = GET_UINT8(pc);
+ MOZ_ASSERT(ndefs == n + 1);
+ uint32_t top = stackDepth + n;
+ OffsetAndDefIndex tmp = offsetStack[top];
+ for (uint32_t i = top; i > stackDepth; i--) {
+ offsetStack[i] = offsetStack[i - 1];
+ }
+ offsetStack[stackDepth] = tmp;
+ break;
+ }
+
+ case JSOp::And:
+ case JSOp::CheckIsObj:
+ case JSOp::CheckObjCoercible:
+ case JSOp::CheckThis:
+ case JSOp::CheckThisReinit:
+ case JSOp::CheckClassHeritage:
+ case JSOp::DebugCheckSelfHosted:
+ case JSOp::InitGLexical:
+ case JSOp::InitLexical:
+ case JSOp::Or:
+ case JSOp::Coalesce:
+ case JSOp::SetAliasedVar:
+ case JSOp::SetArg:
+ case JSOp::SetIntrinsic:
+ case JSOp::SetLocal:
+ case JSOp::InitAliasedLexical:
+ case JSOp::CheckLexical:
+ case JSOp::CheckAliasedLexical:
+ // Keep the top value.
+ MOZ_ASSERT(nuses == 1);
+ MOZ_ASSERT(ndefs == 1);
+ break;
+
+ case JSOp::InitHomeObject:
+ // Pop the top value, keep the other value.
+ MOZ_ASSERT(nuses == 2);
+ MOZ_ASSERT(ndefs == 1);
+ break;
+
+ case JSOp::CheckResumeKind:
+ // Pop the top two values, keep the other value.
+ MOZ_ASSERT(nuses == 3);
+ MOZ_ASSERT(ndefs == 1);
+ break;
+
+ case JSOp::SetGName:
+ case JSOp::SetName:
+ case JSOp::SetProp:
+ case JSOp::StrictSetGName:
+ case JSOp::StrictSetName:
+ case JSOp::StrictSetProp:
+ // Keep the top value, removing other 1 value.
+ MOZ_ASSERT(nuses == 2);
+ MOZ_ASSERT(ndefs == 1);
+ offsetStack[stackDepth] = offsetStack[stackDepth + 1];
+ break;
+
+ case JSOp::SetPropSuper:
+ case JSOp::StrictSetPropSuper:
+ // Keep the top value, removing other 2 values.
+ MOZ_ASSERT(nuses == 3);
+ MOZ_ASSERT(ndefs == 1);
+ offsetStack[stackDepth] = offsetStack[stackDepth + 2];
+ break;
+
+ case JSOp::SetElemSuper:
+ case JSOp::StrictSetElemSuper:
+ // Keep the top value, removing other 3 values.
+ MOZ_ASSERT(nuses == 4);
+ MOZ_ASSERT(ndefs == 1);
+ offsetStack[stackDepth] = offsetStack[stackDepth + 3];
+ break;
+
+ case JSOp::IsGenClosing:
+ case JSOp::IsNoIter:
+ case JSOp::IsNullOrUndefined:
+ case JSOp::MoreIter:
+ // Keep the top value and push one more value.
+ MOZ_ASSERT(nuses == 1);
+ MOZ_ASSERT(ndefs == 2);
+ offsetStack[stackDepth + 1].set(offset, 1);
+ break;
+
+ case JSOp::CheckPrivateField:
+ // Keep the top two values, and push one new value.
+ MOZ_ASSERT(nuses == 2);
+ MOZ_ASSERT(ndefs == 3);
+ offsetStack[stackDepth + 2].set(offset, 2);
+ break;
+ }
+
+#ifdef DEBUG
+end:
+#endif /* DEBUG */
+
+ stackDepth += ndefs;
+ return stackDepth;
+}
+
+bool BytecodeParser::recordBytecode(uint32_t offset,
+ const OffsetAndDefIndex* offsetStack,
+ uint32_t stackDepth) {
+ MOZ_RELEASE_ASSERT(offset < script_->length());
+ MOZ_RELEASE_ASSERT(stackDepth <= maximumStackDepth());
+
+ Bytecode*& code = codeArray_[offset];
+ if (!code) {
+ code = alloc().new_<Bytecode>(alloc());
+ if (!code || !code->captureOffsetStack(alloc(), offsetStack, stackDepth)) {
+ reportOOM();
+ return false;
+ }
+ } else {
+ code->mergeOffsetStack(offsetStack, stackDepth);
+ }
+
+ return true;
+}
+
+bool BytecodeParser::addJump(uint32_t offset, uint32_t stackDepth,
+ const OffsetAndDefIndex* offsetStack,
+ jsbytecode* pc, JumpKind kind) {
+ if (!recordBytecode(offset, offsetStack, stackDepth)) {
+ return false;
+ }
+
+#ifdef DEBUG
+ uint32_t currentOffset = script_->pcToOffset(pc);
+ if (isStackDump) {
+ if (!codeArray_[offset]->addJump(currentOffset, kind)) {
+ reportOOM();
+ return false;
+ }
+ }
+
+ // If this is a backedge, assert we parsed the target JSOp::LoopHead.
+ MOZ_ASSERT_IF(offset < currentOffset, codeArray_[offset]->parsed);
+#endif /* DEBUG */
+
+ return true;
+}
+
+bool BytecodeParser::parse() {
+ MOZ_ASSERT(!codeArray_);
+
+ uint32_t length = script_->length();
+ codeArray_ = alloc().newArray<Bytecode*>(length);
+
+ if (!codeArray_) {
+ reportOOM();
+ return false;
+ }
+
+ mozilla::PodZero(codeArray_, length);
+
+ // Fill in stack depth and definitions at initial bytecode.
+ Bytecode* startcode = alloc().new_<Bytecode>(alloc());
+ if (!startcode) {
+ reportOOM();
+ return false;
+ }
+
+ // Fill in stack depth and definitions at initial bytecode.
+ OffsetAndDefIndex* offsetStack =
+ alloc().newArray<OffsetAndDefIndex>(maximumStackDepth());
+ if (maximumStackDepth() && !offsetStack) {
+ reportOOM();
+ return false;
+ }
+
+ startcode->stackDepth = 0;
+ codeArray_[0] = startcode;
+
+ for (uint32_t offset = 0, nextOffset = 0; offset < length;
+ offset = nextOffset) {
+ Bytecode* code = maybeCode(offset);
+ jsbytecode* pc = script_->offsetToPC(offset);
+
+ // Next bytecode to analyze.
+ nextOffset = offset + GetBytecodeLength(pc);
+
+ MOZ_RELEASE_ASSERT(*pc < JSOP_LIMIT);
+ JSOp op = JSOp(*pc);
+
+ if (!code) {
+ // Haven't found a path by which this bytecode is reachable.
+ continue;
+ }
+
+ // On a jump target, we reload the offsetStack saved for the current
+ // bytecode, as it contains either the original offset stack, or the
+ // merged offset stack.
+ if (BytecodeIsJumpTarget(op)) {
+ for (uint32_t n = 0; n < code->stackDepth; ++n) {
+ offsetStack[n] = code->offsetStack[n];
+ }
+ }
+
+ if (code->parsed) {
+ // No need to reparse.
+ continue;
+ }
+
+ code->parsed = true;
+
+ uint32_t stackDepth = simulateOp(op, offset, offsetStack, code->stackDepth);
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ if (isStackDump) {
+ if (!code->captureOffsetStackAfter(alloc(), offsetStack, stackDepth)) {
+ reportOOM();
+ return false;
+ }
+ }
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
+
+ switch (op) {
+ case JSOp::TableSwitch: {
+ uint32_t defaultOffset = offset + GET_JUMP_OFFSET(pc);
+ jsbytecode* pc2 = pc + JUMP_OFFSET_LEN;
+ int32_t low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ int32_t high = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+
+ if (!addJump(defaultOffset, stackDepth, offsetStack, pc,
+ JumpKind::SwitchDefault)) {
+ return false;
+ }
+
+ uint32_t ncases = high - low + 1;
+
+ for (uint32_t i = 0; i < ncases; i++) {
+ uint32_t targetOffset = script_->tableSwitchCaseOffset(pc, i);
+ if (targetOffset != defaultOffset) {
+ if (!addJump(targetOffset, stackDepth, offsetStack, pc,
+ JumpKind::SwitchCase)) {
+ return false;
+ }
+ }
+ }
+ break;
+ }
+
+ case JSOp::Try: {
+ // Everything between a try and corresponding catch or finally is
+ // conditional. Note that there is no problem with code which is skipped
+ // by a thrown exception but is not caught by a later handler in the
+ // same function: no more code will execute, and it does not matter what
+ // is defined.
+ for (const TryNote& tn : script_->trynotes()) {
+ if (tn.start == offset + JSOpLength_Try) {
+ uint32_t catchOffset = tn.start + tn.length;
+ if (tn.kind() == TryNoteKind::Catch) {
+ if (!addJump(catchOffset, stackDepth, offsetStack, pc,
+ JumpKind::TryCatch)) {
+ return false;
+ }
+ } else if (tn.kind() == TryNoteKind::Finally) {
+ // Two additional values will be on the stack at the beginning
+ // of the finally block: the exception/resume index, and the
+ // |throwing| value. For the benefit of the decompiler, point
+ // them at this Try.
+ offsetStack[stackDepth].set(offset, 0);
+ offsetStack[stackDepth + 1].set(offset, 1);
+ if (!addJump(catchOffset, stackDepth + 2, offsetStack, pc,
+ JumpKind::TryFinally)) {
+ return false;
+ }
+ }
+ }
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ // Check basic jump opcodes, which may or may not have a fallthrough.
+ if (IsJumpOpcode(op)) {
+ // Case instructions do not push the lvalue back when branching.
+ uint32_t newStackDepth = stackDepth;
+ if (op == JSOp::Case) {
+ newStackDepth--;
+ }
+
+ uint32_t targetOffset = offset + GET_JUMP_OFFSET(pc);
+ if (!addJump(targetOffset, newStackDepth, offsetStack, pc,
+ JumpKind::Simple)) {
+ return false;
+ }
+ }
+
+ // Handle any fallthrough from this opcode.
+ if (BytecodeFallsThrough(op)) {
+ if (!recordBytecode(nextOffset, offsetStack, stackDepth)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+
+bool js::ReconstructStackDepth(JSContext* cx, JSScript* script, jsbytecode* pc,
+ uint32_t* depth, bool* reachablePC) {
+ LifoAllocScope allocScope(&cx->tempLifoAlloc());
+ BytecodeParser parser(cx, allocScope.alloc(), script);
+ if (!parser.parse()) {
+ return false;
+ }
+
+ *reachablePC = parser.isReachable(pc);
+
+ if (*reachablePC) {
+ *depth = parser.stackDepthAtPC(pc);
+ }
+
+ return true;
+}
+
+static unsigned Disassemble1(JSContext* cx, HandleScript script, jsbytecode* pc,
+ unsigned loc, bool lines,
+ const BytecodeParser* parser, Sprinter* sp);
+
+/*
+ * If pc != nullptr, include a prefix indicating whether the PC is at the
+ * current line. If showAll is true, include the source note type and the
+ * entry stack depth.
+ */
+[[nodiscard]] static bool DisassembleAtPC(
+ JSContext* cx, JSScript* scriptArg, bool lines, const jsbytecode* pc,
+ bool showAll, Sprinter* sp,
+ DisassembleSkeptically skeptically = DisassembleSkeptically::No) {
+ LifoAllocScope allocScope(&cx->tempLifoAlloc());
+ RootedScript script(cx, scriptArg);
+ mozilla::Maybe<BytecodeParser> parser;
+
+ if (skeptically == DisassembleSkeptically::No) {
+ parser.emplace(cx, allocScope.alloc(), script);
+ parser->setStackDump();
+ if (!parser->parse()) {
+ return false;
+ }
+ }
+
+ if (showAll) {
+ if (!sp->jsprintf("%s:%u\n", script->filename(),
+ unsigned(script->lineno()))) {
+ return false;
+ }
+ }
+
+ if (pc != nullptr) {
+ if (!sp->put(" ")) {
+ return false;
+ }
+ }
+ if (showAll) {
+ if (!sp->put("sn stack ")) {
+ return false;
+ }
+ }
+ if (!sp->put("loc ")) {
+ return false;
+ }
+ if (lines) {
+ if (!sp->put("line")) {
+ return false;
+ }
+ }
+ if (!sp->put(" op\n")) {
+ return false;
+ }
+
+ if (pc != nullptr) {
+ if (!sp->put(" ")) {
+ return false;
+ }
+ }
+ if (showAll) {
+ if (!sp->put("-- ----- ")) {
+ return false;
+ }
+ }
+ if (!sp->put("----- ")) {
+ return false;
+ }
+ if (lines) {
+ if (!sp->put("----")) {
+ return false;
+ }
+ }
+ if (!sp->put(" --\n")) {
+ return false;
+ }
+
+ jsbytecode* next = script->code();
+ jsbytecode* end = script->codeEnd();
+ while (next < end) {
+ if (next == script->main()) {
+ if (!sp->put("main:\n")) {
+ return false;
+ }
+ }
+ if (pc != nullptr) {
+ if (!sp->put(pc == next ? "--> " : " ")) {
+ return false;
+ }
+ }
+ if (showAll) {
+ const SrcNote* sn = GetSrcNote(cx, script, next);
+ if (sn) {
+ MOZ_ASSERT(!sn->isTerminator());
+ SrcNoteIterator iter(sn);
+ while (true) {
+ ++iter;
+ auto next = *iter;
+ if (!(!next->isTerminator() && next->delta() == 0)) {
+ break;
+ }
+ if (!sp->jsprintf("%s\n ", sn->name())) {
+ return false;
+ }
+ sn = *iter;
+ }
+ if (!sp->jsprintf("%s ", sn->name())) {
+ return false;
+ }
+ } else {
+ if (!sp->put(" ")) {
+ return false;
+ }
+ }
+ if (parser && parser->isReachable(next)) {
+ if (!sp->jsprintf("%05u ", parser->stackDepthAtPC(next))) {
+ return false;
+ }
+ } else {
+ if (!sp->put(" ")) {
+ return false;
+ }
+ }
+ }
+ unsigned len = Disassemble1(cx, script, next, script->pcToOffset(next),
+ lines, parser.ptrOr(nullptr), sp);
+ if (!len) {
+ return false;
+ }
+
+ next += len;
+ }
+
+ return true;
+}
+
+bool js::Disassemble(JSContext* cx, HandleScript script, bool lines,
+ Sprinter* sp, DisassembleSkeptically skeptically) {
+ return DisassembleAtPC(cx, script, lines, nullptr, false, sp, skeptically);
+}
+
+JS_PUBLIC_API bool js::DumpPC(JSContext* cx, FILE* fp) {
+ gc::AutoSuppressGC suppressGC(cx);
+ Sprinter sprinter(cx);
+ if (!sprinter.init()) {
+ return false;
+ }
+ ScriptFrameIter iter(cx);
+ if (iter.done()) {
+ fprintf(fp, "Empty stack.\n");
+ return true;
+ }
+ RootedScript script(cx, iter.script());
+ bool ok = DisassembleAtPC(cx, script, true, iter.pc(), false, &sprinter);
+ fprintf(fp, "%s", sprinter.string());
+ return ok;
+}
+
+JS_PUBLIC_API bool js::DumpScript(JSContext* cx, JSScript* scriptArg,
+ FILE* fp) {
+ gc::AutoSuppressGC suppressGC(cx);
+ Sprinter sprinter(cx);
+ if (!sprinter.init()) {
+ return false;
+ }
+ RootedScript script(cx, scriptArg);
+ bool ok = Disassemble(cx, script, true, &sprinter);
+ fprintf(fp, "%s", sprinter.string());
+ return ok;
+}
+
+static UniqueChars ToDisassemblySource(JSContext* cx, HandleValue v) {
+ if (v.isString()) {
+ return QuoteString(cx, v.toString(), '"');
+ }
+
+ if (JS::RuntimeHeapIsBusy()) {
+ return DuplicateString(cx, "<value>");
+ }
+
+ if (v.isObject()) {
+ JSObject& obj = v.toObject();
+
+ if (obj.is<JSFunction>()) {
+ RootedFunction fun(cx, &obj.as<JSFunction>());
+ JSString* str = JS_DecompileFunction(cx, fun);
+ if (!str) {
+ return nullptr;
+ }
+ return QuoteString(cx, str);
+ }
+
+ if (obj.is<RegExpObject>()) {
+ Rooted<RegExpObject*> reobj(cx, &obj.as<RegExpObject>());
+ JSString* source = RegExpObject::toString(cx, reobj);
+ if (!source) {
+ return nullptr;
+ }
+ return QuoteString(cx, source);
+ }
+ }
+
+ JSString* str = ValueToSource(cx, v);
+ if (!str) {
+ return nullptr;
+ }
+ return QuoteString(cx, str);
+}
+
+static bool ToDisassemblySource(JSContext* cx, Handle<Scope*> scope,
+ UniqueChars* bytes) {
+ UniqueChars source = JS_smprintf("%s {", ScopeKindString(scope->kind()));
+ if (!source) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ for (Rooted<BindingIter> bi(cx, BindingIter(scope)); bi; bi++) {
+ UniqueChars nameBytes = AtomToPrintableString(cx, bi.name());
+ if (!nameBytes) {
+ return false;
+ }
+
+ source = JS_sprintf_append(std::move(source), "%s: ", nameBytes.get());
+ if (!source) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ BindingLocation loc = bi.location();
+ switch (loc.kind()) {
+ case BindingLocation::Kind::Global:
+ source = JS_sprintf_append(std::move(source), "global");
+ break;
+
+ case BindingLocation::Kind::Frame:
+ source =
+ JS_sprintf_append(std::move(source), "frame slot %u", loc.slot());
+ break;
+
+ case BindingLocation::Kind::Environment:
+ source =
+ JS_sprintf_append(std::move(source), "env slot %u", loc.slot());
+ break;
+
+ case BindingLocation::Kind::Argument:
+ source =
+ JS_sprintf_append(std::move(source), "arg slot %u", loc.slot());
+ break;
+
+ case BindingLocation::Kind::NamedLambdaCallee:
+ source = JS_sprintf_append(std::move(source), "named lambda callee");
+ break;
+
+ case BindingLocation::Kind::Import:
+ source = JS_sprintf_append(std::move(source), "import");
+ break;
+ }
+
+ if (!source) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!bi.isLast()) {
+ source = JS_sprintf_append(std::move(source), ", ");
+ if (!source) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+ }
+
+ source = JS_sprintf_append(std::move(source), "}");
+ if (!source) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ *bytes = std::move(source);
+ return true;
+}
+
+static bool DumpJumpOrigins(HandleScript script, jsbytecode* pc,
+ const BytecodeParser* parser, Sprinter* sp) {
+ bool called = false;
+ auto callback = [&script, &sp, &called](jsbytecode* pc,
+ BytecodeParser::JumpKind kind) {
+ if (!called) {
+ called = true;
+ if (!sp->put("\n# ")) {
+ return false;
+ }
+ } else {
+ if (!sp->put(", ")) {
+ return false;
+ }
+ }
+
+ switch (kind) {
+ case BytecodeParser::JumpKind::Simple:
+ break;
+
+ case BytecodeParser::JumpKind::SwitchCase:
+ if (!sp->put("switch-case ")) {
+ return false;
+ }
+ break;
+
+ case BytecodeParser::JumpKind::SwitchDefault:
+ if (!sp->put("switch-default ")) {
+ return false;
+ }
+ break;
+
+ case BytecodeParser::JumpKind::TryCatch:
+ if (!sp->put("try-catch ")) {
+ return false;
+ }
+ break;
+
+ case BytecodeParser::JumpKind::TryFinally:
+ if (!sp->put("try-finally ")) {
+ return false;
+ }
+ break;
+ }
+
+ if (!sp->jsprintf("from %s @ %05u", CodeName(JSOp(*pc)),
+ unsigned(script->pcToOffset(pc)))) {
+ return false;
+ }
+
+ return true;
+ };
+ if (!parser->forEachJumpOrigins(pc, callback)) {
+ return false;
+ }
+ if (called) {
+ if (!sp->put("\n")) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool DecompileAtPCForStackDump(
+ JSContext* cx, HandleScript script,
+ const OffsetAndDefIndex& offsetAndDefIndex, Sprinter* sp);
+
+static bool PrintShapeProperties(JSContext* cx, Sprinter* sp,
+ SharedShape* shape) {
+ // Add all property keys to a vector to allow printing them in property
+ // definition order.
+ Vector<PropertyKey> props(cx);
+ for (SharedShapePropertyIter<NoGC> iter(shape); !iter.done(); iter++) {
+ if (!props.append(iter->key())) {
+ return false;
+ }
+ }
+
+ if (!sp->put("{")) {
+ return false;
+ }
+
+ for (size_t i = props.length(); i > 0; i--) {
+ PropertyKey key = props[i - 1];
+ RootedValue keyv(cx, IdToValue(key));
+ JSString* str = ToString<NoGC>(cx, keyv);
+ if (!str) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ if (!sp->putString(str)) {
+ return false;
+ }
+ if (i > 1) {
+ if (!sp->put(", ")) {
+ return false;
+ }
+ }
+ }
+
+ return sp->put("}");
+}
+
+static unsigned Disassemble1(JSContext* cx, HandleScript script, jsbytecode* pc,
+ unsigned loc, bool lines,
+ const BytecodeParser* parser, Sprinter* sp) {
+ if (parser && parser->isReachable(pc)) {
+ if (!DumpJumpOrigins(script, pc, parser, sp)) {
+ return 0;
+ }
+ }
+
+ size_t before = sp->stringEnd() - sp->string();
+ bool stackDumped = false;
+ auto dumpStack = [&cx, &script, &pc, &parser, &sp, &before, &stackDumped]() {
+ if (!parser) {
+ return true;
+ }
+ if (stackDumped) {
+ return true;
+ }
+ stackDumped = true;
+
+ size_t after = sp->stringEnd() - sp->string();
+ MOZ_ASSERT(after >= before);
+
+ static const size_t stack_column = 40;
+ for (size_t i = after - before; i < stack_column - 1; i++) {
+ if (!sp->put(" ")) {
+ return false;
+ }
+ }
+
+ if (!sp->put(" # ")) {
+ return false;
+ }
+
+ if (!parser->isReachable(pc)) {
+ if (!sp->put("!!! UNREACHABLE !!!")) {
+ return false;
+ }
+ } else {
+ uint32_t depth = parser->stackDepthAfterPC(pc);
+
+ for (uint32_t i = 0; i < depth; i++) {
+ if (i) {
+ if (!sp->put(" ")) {
+ return false;
+ }
+ }
+
+ const OffsetAndDefIndex& offsetAndDefIndex =
+ parser->offsetForStackOperandAfterPC(script->pcToOffset(pc), i);
+ // This will decompile the stack for the same PC many times.
+ // We'll avoid optimizing it since this is a testing function
+ // and it won't be worth managing cached expression here.
+ if (!DecompileAtPCForStackDump(cx, script, offsetAndDefIndex, sp)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+ };
+
+ if (*pc >= JSOP_LIMIT) {
+ char numBuf1[12], numBuf2[12];
+ SprintfLiteral(numBuf1, "%d", int(*pc));
+ SprintfLiteral(numBuf2, "%d", JSOP_LIMIT);
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BYTECODE_TOO_BIG, numBuf1, numBuf2);
+ return 0;
+ }
+ JSOp op = JSOp(*pc);
+ const JSCodeSpec& cs = CodeSpec(op);
+ const unsigned len = cs.length;
+ if (!sp->jsprintf("%05u:", loc)) {
+ return 0;
+ }
+ if (lines) {
+ if (!sp->jsprintf("%4u", PCToLineNumber(script, pc))) {
+ return 0;
+ }
+ }
+ if (!sp->jsprintf(" %s", CodeName(op))) {
+ return 0;
+ }
+
+ int i;
+ switch (JOF_TYPE(cs.format)) {
+ case JOF_BYTE:
+ break;
+
+ case JOF_JUMP: {
+ ptrdiff_t off = GET_JUMP_OFFSET(pc);
+ if (!sp->jsprintf(" %u (%+d)", unsigned(loc + int(off)), int(off))) {
+ return 0;
+ }
+ break;
+ }
+
+ case JOF_SCOPE: {
+ Rooted<Scope*> scope(cx, script->getScope(pc));
+ UniqueChars bytes;
+ if (!ToDisassemblySource(cx, scope, &bytes)) {
+ return 0;
+ }
+ if (!sp->jsprintf(" %s", bytes.get())) {
+ return 0;
+ }
+ break;
+ }
+
+ case JOF_ENVCOORD: {
+ RootedValue v(cx, StringValue(EnvironmentCoordinateNameSlow(script, pc)));
+ UniqueChars bytes = ToDisassemblySource(cx, v);
+ if (!bytes) {
+ return 0;
+ }
+ EnvironmentCoordinate ec(pc);
+ if (!sp->jsprintf(" %s (hops = %u, slot = %u)", bytes.get(), ec.hops(),
+ ec.slot())) {
+ return 0;
+ }
+ break;
+ }
+ case JOF_DEBUGCOORD: {
+ EnvironmentCoordinate ec(pc);
+ if (!sp->jsprintf("(hops = %u, slot = %u)", ec.hops(), ec.slot())) {
+ return 0;
+ }
+ break;
+ }
+ case JOF_ATOM: {
+ RootedValue v(cx, StringValue(script->getAtom(pc)));
+ UniqueChars bytes = ToDisassemblySource(cx, v);
+ if (!bytes) {
+ return 0;
+ }
+ if (!sp->jsprintf(" %s", bytes.get())) {
+ return 0;
+ }
+ break;
+ }
+ case JOF_STRING: {
+ RootedValue v(cx, StringValue(script->getString(pc)));
+ UniqueChars bytes = ToDisassemblySource(cx, v);
+ if (!bytes) {
+ return 0;
+ }
+ if (!sp->jsprintf(" %s", bytes.get())) {
+ return 0;
+ }
+ break;
+ }
+
+ case JOF_DOUBLE: {
+ double d = GET_INLINE_VALUE(pc).toDouble();
+ if (!sp->jsprintf(" %lf", d)) {
+ return 0;
+ }
+ break;
+ }
+
+ case JOF_BIGINT: {
+ RootedValue v(cx, BigIntValue(script->getBigInt(pc)));
+ UniqueChars bytes = ToDisassemblySource(cx, v);
+ if (!bytes) {
+ return 0;
+ }
+ if (!sp->jsprintf(" %s", bytes.get())) {
+ return 0;
+ }
+ break;
+ }
+
+ case JOF_OBJECT: {
+ JSObject* obj = script->getObject(pc);
+ {
+ RootedValue v(cx, ObjectValue(*obj));
+ UniqueChars bytes = ToDisassemblySource(cx, v);
+ if (!bytes) {
+ return 0;
+ }
+ if (!sp->jsprintf(" %s", bytes.get())) {
+ return 0;
+ }
+ }
+ break;
+ }
+
+ case JOF_SHAPE: {
+ SharedShape* shape = script->getShape(pc);
+ if (!sp->put(" ")) {
+ return 0;
+ }
+ if (!PrintShapeProperties(cx, sp, shape)) {
+ return 0;
+ }
+ break;
+ }
+
+ case JOF_REGEXP: {
+ js::RegExpObject* obj = script->getRegExp(pc);
+ RootedValue v(cx, ObjectValue(*obj));
+ UniqueChars bytes = ToDisassemblySource(cx, v);
+ if (!bytes) {
+ return 0;
+ }
+ if (!sp->jsprintf(" %s", bytes.get())) {
+ return 0;
+ }
+ break;
+ }
+
+ case JOF_TABLESWITCH: {
+ int32_t i, low, high;
+
+ ptrdiff_t off = GET_JUMP_OFFSET(pc);
+ jsbytecode* pc2 = pc + JUMP_OFFSET_LEN;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ if (!sp->jsprintf(" defaultOffset %d low %d high %d", int(off), low,
+ high)) {
+ return 0;
+ }
+
+ // Display stack dump before diplaying the offsets for each case.
+ if (!dumpStack()) {
+ return 0;
+ }
+
+ for (i = low; i <= high; i++) {
+ off =
+ script->tableSwitchCaseOffset(pc, i - low) - script->pcToOffset(pc);
+ if (!sp->jsprintf("\n\t%d: %d", i, int(off))) {
+ return 0;
+ }
+ }
+ break;
+ }
+
+ case JOF_QARG:
+ if (!sp->jsprintf(" %u", GET_ARGNO(pc))) {
+ return 0;
+ }
+ break;
+
+ case JOF_LOCAL:
+ if (!sp->jsprintf(" %u", GET_LOCALNO(pc))) {
+ return 0;
+ }
+ break;
+
+ case JOF_GCTHING:
+ if (!sp->jsprintf(" %u", unsigned(GET_GCTHING_INDEX(pc)))) {
+ return 0;
+ }
+ break;
+
+ case JOF_UINT32:
+ if (!sp->jsprintf(" %u", GET_UINT32(pc))) {
+ return 0;
+ }
+ break;
+
+ case JOF_ICINDEX:
+ if (!sp->jsprintf(" (ic: %u)", GET_ICINDEX(pc))) {
+ return 0;
+ }
+ break;
+
+ case JOF_LOOPHEAD:
+ if (!sp->jsprintf(" (ic: %u, depthHint: %u)", GET_ICINDEX(pc),
+ LoopHeadDepthHint(pc))) {
+ return 0;
+ }
+ break;
+
+ case JOF_TWO_UINT8: {
+ int one = (int)GET_UINT8(pc);
+ int two = (int)GET_UINT8(pc + 1);
+
+ if (!sp->jsprintf(" %d", one)) {
+ return 0;
+ }
+ if (!sp->jsprintf(" %d", two)) {
+ return 0;
+ }
+ break;
+ }
+
+ case JOF_ARGC:
+ case JOF_UINT16:
+ i = (int)GET_UINT16(pc);
+ goto print_int;
+
+ case JOF_RESUMEINDEX:
+ case JOF_UINT24:
+ MOZ_ASSERT(len == 4);
+ i = (int)GET_UINT24(pc);
+ goto print_int;
+
+ case JOF_UINT8:
+ i = GET_UINT8(pc);
+ goto print_int;
+
+ case JOF_INT8:
+ i = GET_INT8(pc);
+ goto print_int;
+
+ case JOF_INT32:
+ MOZ_ASSERT(op == JSOp::Int32);
+ i = GET_INT32(pc);
+ print_int:
+ if (!sp->jsprintf(" %d", i)) {
+ return 0;
+ }
+ break;
+
+ default: {
+ char numBuf[12];
+ SprintfLiteral(numBuf, "%x", cs.format);
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_UNKNOWN_FORMAT, numBuf);
+ return 0;
+ }
+ }
+
+ if (!dumpStack()) {
+ return 0;
+ }
+
+ if (!sp->put("\n")) {
+ return 0;
+ }
+ return len;
+}
+
+unsigned js::Disassemble1(JSContext* cx, JS::Handle<JSScript*> script,
+ jsbytecode* pc, unsigned loc, bool lines,
+ Sprinter* sp) {
+ return Disassemble1(cx, script, pc, loc, lines, nullptr, sp);
+}
+
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
+
+namespace {
+/*
+ * The expression decompiler is invoked by error handling code to produce a
+ * string representation of the erroring expression. As it's only a debugging
+ * tool, it only supports basic expressions. For anything complicated, it simply
+ * puts "(intermediate value)" into the error result.
+ *
+ * Here's the basic algorithm:
+ *
+ * 1. Find the stack location of the value whose expression we wish to
+ * decompile. The error handler can explicitly pass this as an
+ * argument. Otherwise, we search backwards down the stack for the offending
+ * value.
+ *
+ * 2. Instantiate and run a BytecodeParser for the current frame. This creates a
+ * stack of pcs parallel to the interpreter stack; given an interpreter stack
+ * location, the corresponding pc stack location contains the opcode that pushed
+ * the value in the interpreter. Now, with the result of step 1, we have the
+ * opcode responsible for pushing the value we want to decompile.
+ *
+ * 3. Pass the opcode to decompilePC. decompilePC is the main decompiler
+ * routine, responsible for a string representation of the expression that
+ * generated a certain stack location. decompilePC looks at one opcode and
+ * returns the JS source equivalent of that opcode.
+ *
+ * 4. Expressions can, of course, contain subexpressions. For example, the
+ * literals "4" and "5" are subexpressions of the addition operator in "4 +
+ * 5". If we need to decompile a subexpression, we call decompilePC (step 2)
+ * recursively on the operands' pcs. The result is a depth-first traversal of
+ * the expression tree.
+ *
+ */
+struct ExpressionDecompiler {
+ JSContext* cx;
+ RootedScript script;
+ const BytecodeParser& parser;
+ Sprinter sprinter;
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ // Dedicated mode for stack dump.
+ // Generates an expression for stack dump, including internal state,
+ // and also disables special handling for self-hosted code.
+ bool isStackDump;
+#endif
+
+ ExpressionDecompiler(JSContext* cx, JSScript* script,
+ const BytecodeParser& parser)
+ : cx(cx),
+ script(cx, script),
+ parser(parser),
+ sprinter(cx)
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ ,
+ isStackDump(false)
+#endif
+ {
+ }
+ bool init();
+ bool decompilePCForStackOperand(jsbytecode* pc, int i);
+ bool decompilePC(jsbytecode* pc, uint8_t defIndex);
+ bool decompilePC(const OffsetAndDefIndex& offsetAndDefIndex);
+ JSAtom* getArg(unsigned slot);
+ JSAtom* loadAtom(jsbytecode* pc);
+ JSString* loadString(jsbytecode* pc);
+ bool quote(JSString* s, char quote);
+ bool write(const char* s);
+ bool write(JSString* str);
+ UniqueChars getOutput();
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ void setStackDump() { isStackDump = true; }
+#endif
+};
+
+bool ExpressionDecompiler::decompilePCForStackOperand(jsbytecode* pc, int i) {
+ return decompilePC(parser.offsetForStackOperand(script->pcToOffset(pc), i));
+}
+
+bool ExpressionDecompiler::decompilePC(jsbytecode* pc, uint8_t defIndex) {
+ MOZ_ASSERT(script->containsPC(pc));
+
+ JSOp op = (JSOp)*pc;
+
+ if (const char* token = CodeToken[uint8_t(op)]) {
+ MOZ_ASSERT(defIndex == 0);
+ MOZ_ASSERT(CodeSpec(op).ndefs == 1);
+
+ // Handle simple cases of binary and unary operators.
+ switch (CodeSpec(op).nuses) {
+ case 2: {
+ const SrcNote* sn = GetSrcNote(cx, script, pc);
+ const char* extra =
+ sn && sn->type() == SrcNoteType::AssignOp ? "=" : "";
+ return write("(") && decompilePCForStackOperand(pc, -2) && write(" ") &&
+ write(token) && write(extra) && write(" ") &&
+ decompilePCForStackOperand(pc, -1) && write(")");
+ break;
+ }
+ case 1:
+ return write("(") && write(token) &&
+ decompilePCForStackOperand(pc, -1) && write(")");
+ default:
+ break;
+ }
+ }
+
+ switch (op) {
+ case JSOp::DelName:
+ return write("(delete ") && write(loadAtom(pc)) && write(")");
+
+ case JSOp::GetGName:
+ case JSOp::GetName:
+ case JSOp::GetIntrinsic:
+ return write(loadAtom(pc));
+ case JSOp::GetArg: {
+ unsigned slot = GET_ARGNO(pc);
+
+ // For self-hosted scripts that are called from non-self-hosted code,
+ // decompiling the parameter name in the self-hosted script is
+ // unhelpful. Decompile the argument name instead.
+ if (script->selfHosted()
+#ifdef DEBUG
+ // For stack dump, argument name is not necessary.
+ && !isStackDump
+#endif /* DEBUG */
+ ) {
+ UniqueChars result;
+ if (!DecompileArgumentFromStack(cx, slot, &result)) {
+ return false;
+ }
+
+ // Note that decompiling the argument in the parent frame might
+ // not succeed.
+ if (result) {
+ return write(result.get());
+ }
+
+ // If it fails, do not return parameter name and let the caller
+ // fallback.
+ return write("(intermediate value)");
+ }
+
+ JSAtom* atom = getArg(slot);
+ if (!atom) {
+ return false;
+ }
+ return write(atom);
+ }
+ case JSOp::GetLocal: {
+ JSAtom* atom = FrameSlotName(script, pc);
+ MOZ_ASSERT(atom);
+ return write(atom);
+ }
+ case JSOp::GetAliasedVar: {
+ JSAtom* atom = EnvironmentCoordinateNameSlow(script, pc);
+ MOZ_ASSERT(atom);
+ return write(atom);
+ }
+
+ case JSOp::DelProp:
+ case JSOp::StrictDelProp:
+ case JSOp::GetProp:
+ case JSOp::GetBoundName: {
+ bool hasDelete = op == JSOp::DelProp || op == JSOp::StrictDelProp;
+ Rooted<JSAtom*> prop(cx, loadAtom(pc));
+ MOZ_ASSERT(prop);
+ return (hasDelete ? write("(delete ") : true) &&
+ decompilePCForStackOperand(pc, -1) &&
+ (IsIdentifier(prop)
+ ? write(".") && quote(prop, '\0')
+ : write("[") && quote(prop, '\'') && write("]")) &&
+ (hasDelete ? write(")") : true);
+ }
+ case JSOp::GetPropSuper: {
+ Rooted<JSAtom*> prop(cx, loadAtom(pc));
+ return write("super.") && quote(prop, '\0');
+ }
+ case JSOp::SetElem:
+ case JSOp::StrictSetElem:
+ // NOTE: We don't show the right hand side of the operation because
+ // it's used in error messages like: "a[0] is not readable".
+ //
+ // We could though.
+ return decompilePCForStackOperand(pc, -3) && write("[") &&
+ decompilePCForStackOperand(pc, -2) && write("]");
+
+ case JSOp::DelElem:
+ case JSOp::StrictDelElem:
+ case JSOp::GetElem: {
+ bool hasDelete = (op == JSOp::DelElem || op == JSOp::StrictDelElem);
+ return (hasDelete ? write("(delete ") : true) &&
+ decompilePCForStackOperand(pc, -2) && write("[") &&
+ decompilePCForStackOperand(pc, -1) && write("]") &&
+ (hasDelete ? write(")") : true);
+ }
+
+ case JSOp::GetElemSuper:
+ return write("super[") && decompilePCForStackOperand(pc, -2) &&
+ write("]");
+ case JSOp::Null:
+ return write(js_null_str);
+ case JSOp::True:
+ return write(js_true_str);
+ case JSOp::False:
+ return write(js_false_str);
+ case JSOp::Zero:
+ case JSOp::One:
+ case JSOp::Int8:
+ case JSOp::Uint16:
+ case JSOp::Uint24:
+ case JSOp::Int32:
+ return sprinter.printf("%d", GetBytecodeInteger(pc));
+ case JSOp::String:
+ return quote(loadString(pc), '"');
+ case JSOp::Symbol: {
+ unsigned i = uint8_t(pc[1]);
+ MOZ_ASSERT(i < JS::WellKnownSymbolLimit);
+ if (i < JS::WellKnownSymbolLimit) {
+ return write(cx->names().wellKnownSymbolDescriptions()[i]);
+ }
+ break;
+ }
+ case JSOp::Undefined:
+ return write(js_undefined_str);
+ case JSOp::GlobalThis:
+ case JSOp::NonSyntacticGlobalThis:
+ // |this| could convert to a very long object initialiser, so cite it by
+ // its keyword name.
+ return write(js_this_str);
+ case JSOp::NewTarget:
+ return write("new.target");
+ case JSOp::Call:
+ case JSOp::CallContent:
+ case JSOp::CallIgnoresRv:
+ case JSOp::CallIter:
+ case JSOp::CallContentIter: {
+ uint16_t argc = GET_ARGC(pc);
+ return decompilePCForStackOperand(pc, -int32_t(argc + 2)) &&
+ write(argc ? "(...)" : "()");
+ }
+ case JSOp::SpreadCall:
+ return decompilePCForStackOperand(pc, -3) && write("(...)");
+ case JSOp::NewArray:
+ return write("[]");
+ case JSOp::RegExp: {
+ Rooted<RegExpObject*> obj(cx, &script->getObject(pc)->as<RegExpObject>());
+ JSString* str = RegExpObject::toString(cx, obj);
+ if (!str) {
+ return false;
+ }
+ return write(str);
+ }
+ case JSOp::Object: {
+ JSObject* obj = script->getObject(pc);
+ RootedValue objv(cx, ObjectValue(*obj));
+ JSString* str = ValueToSource(cx, objv);
+ if (!str) {
+ return false;
+ }
+ return write(str);
+ }
+ case JSOp::Void:
+ return write("(void ") && decompilePCForStackOperand(pc, -1) &&
+ write(")");
+
+ case JSOp::SuperCall:
+ if (GET_ARGC(pc) == 0) {
+ return write("super()");
+ }
+ [[fallthrough]];
+ case JSOp::SpreadSuperCall:
+ return write("super(...)");
+ case JSOp::SuperFun:
+ return write("super");
+
+ case JSOp::Eval:
+ case JSOp::SpreadEval:
+ case JSOp::StrictEval:
+ case JSOp::StrictSpreadEval:
+ return write("eval(...)");
+
+ case JSOp::New:
+ case JSOp::NewContent: {
+ uint16_t argc = GET_ARGC(pc);
+ return write("(new ") &&
+ decompilePCForStackOperand(pc, -int32_t(argc + 3)) &&
+ write(argc ? "(...))" : "())");
+ }
+
+ case JSOp::SpreadNew:
+ return write("(new ") && decompilePCForStackOperand(pc, -4) &&
+ write("(...))");
+
+ case JSOp::Typeof:
+ case JSOp::TypeofExpr:
+ return write("(typeof ") && decompilePCForStackOperand(pc, -1) &&
+ write(")");
+
+ case JSOp::InitElemArray:
+ return write("[...]");
+
+ case JSOp::InitElemInc:
+ if (defIndex == 0) {
+ return write("[...]");
+ }
+ MOZ_ASSERT(defIndex == 1);
+#ifdef DEBUG
+ // INDEX won't be be exposed to error message.
+ if (isStackDump) {
+ return write("INDEX");
+ }
+#endif
+ break;
+
+ case JSOp::ToNumeric:
+ return write("(tonumeric ") && decompilePCForStackOperand(pc, -1) &&
+ write(")");
+
+ case JSOp::Inc:
+ return write("(inc ") && decompilePCForStackOperand(pc, -1) && write(")");
+
+ case JSOp::Dec:
+ return write("(dec ") && decompilePCForStackOperand(pc, -1) && write(")");
+
+ case JSOp::BigInt:
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ // BigInt::dump() only available in this configuration.
+ script->getBigInt(pc)->dump(sprinter);
+ return !sprinter.hadOutOfMemory();
+#else
+ return write("[bigint]");
+#endif
+
+ case JSOp::BuiltinObject: {
+ auto kind = BuiltinObjectKind(GET_UINT8(pc));
+ return write(BuiltinObjectName(kind));
+ }
+
+#ifdef ENABLE_RECORD_TUPLE
+ case JSOp::InitTuple:
+ return write("#[]");
+
+ case JSOp::AddTupleElement:
+ case JSOp::FinishTuple:
+ return write("#[...]");
+#endif
+
+ default:
+ break;
+ }
+
+#ifdef DEBUG
+ if (isStackDump) {
+ // Special decompilation for stack dump.
+ switch (op) {
+ case JSOp::Arguments:
+ return write("arguments");
+
+ case JSOp::BindGName:
+ return write("GLOBAL");
+
+ case JSOp::BindName:
+ case JSOp::BindVar:
+ return write("ENV");
+
+ case JSOp::Callee:
+ return write("CALLEE");
+
+ case JSOp::EnvCallee:
+ return write("ENVCALLEE");
+
+ case JSOp::CallSiteObj:
+ return write("OBJ");
+
+ case JSOp::Double:
+ return sprinter.printf("%lf", GET_INLINE_VALUE(pc).toDouble());
+
+ case JSOp::Exception:
+ return write("EXCEPTION");
+
+ case JSOp::Try:
+ // Used for the values live on entry to the finally block.
+ // See TryNoteKind::Finally above.
+ if (defIndex == 0) {
+ return write("PC");
+ }
+ MOZ_ASSERT(defIndex == 1);
+ return write("THROWING");
+
+ case JSOp::FunctionThis:
+ case JSOp::ImplicitThis:
+ return write("THIS");
+
+ case JSOp::FunWithProto:
+ return write("FUN");
+
+ case JSOp::Generator:
+ return write("GENERATOR");
+
+ case JSOp::GetImport:
+ return write("VAL");
+
+ case JSOp::GetRval:
+ return write("RVAL");
+
+ case JSOp::Hole:
+ return write("HOLE");
+
+ case JSOp::IsGenClosing:
+ // For stack dump, defIndex == 0 is not used.
+ MOZ_ASSERT(defIndex == 1);
+ return write("ISGENCLOSING");
+
+ case JSOp::IsNoIter:
+ // For stack dump, defIndex == 0 is not used.
+ MOZ_ASSERT(defIndex == 1);
+ return write("ISNOITER");
+
+ case JSOp::IsConstructing:
+ return write("JS_IS_CONSTRUCTING");
+
+ case JSOp::IsNullOrUndefined:
+ return write("IS_NULL_OR_UNDEF");
+
+ case JSOp::Iter:
+ return write("ITER");
+
+ case JSOp::Lambda:
+ return write("FUN");
+
+ case JSOp::ToAsyncIter:
+ return write("ASYNCITER");
+
+ case JSOp::MoreIter:
+ // For stack dump, defIndex == 0 is not used.
+ MOZ_ASSERT(defIndex == 1);
+ return write("MOREITER");
+
+ case JSOp::MutateProto:
+ return write("SUCCEEDED");
+
+ case JSOp::NewInit:
+ case JSOp::NewObject:
+ case JSOp::ObjWithProto:
+ return write("OBJ");
+
+ case JSOp::OptimizeSpreadCall:
+ return write("OPTIMIZED");
+
+ case JSOp::Rest:
+ return write("REST");
+
+ case JSOp::Resume:
+ return write("RVAL");
+
+ case JSOp::SuperBase:
+ return write("HOMEOBJECTPROTO");
+
+ case JSOp::ToPropertyKey:
+ return write("TOPROPERTYKEY(") && decompilePCForStackOperand(pc, -1) &&
+ write(")");
+ case JSOp::ToString:
+ return write("TOSTRING(") && decompilePCForStackOperand(pc, -1) &&
+ write(")");
+
+ case JSOp::Uninitialized:
+ return write("UNINITIALIZED");
+
+ case JSOp::InitialYield:
+ case JSOp::Await:
+ case JSOp::Yield:
+ // Printing "yield SOMETHING" is confusing since the operand doesn't
+ // match to the syntax, since the stack operand for "yield 10" is
+ // the result object, not 10.
+ if (defIndex == 0) {
+ return write("RVAL");
+ }
+ if (defIndex == 1) {
+ return write("GENERATOR");
+ }
+ MOZ_ASSERT(defIndex == 2);
+ return write("RESUMEKIND");
+
+ case JSOp::ResumeKind:
+ return write("RESUMEKIND");
+
+ case JSOp::AsyncAwait:
+ case JSOp::AsyncResolve:
+ return write("PROMISE");
+
+ case JSOp::CheckPrivateField:
+ return write("HasPrivateField");
+
+ case JSOp::NewPrivateName:
+ return write("PRIVATENAME");
+
+ case JSOp::CheckReturn:
+ return write("RVAL");
+
+ default:
+ break;
+ }
+ return write("<unknown>");
+ }
+#endif /* DEBUG */
+
+ return write("(intermediate value)");
+}
+
+bool ExpressionDecompiler::decompilePC(
+ const OffsetAndDefIndex& offsetAndDefIndex) {
+ if (offsetAndDefIndex.isSpecial()) {
+#ifdef DEBUG
+ if (isStackDump) {
+ if (offsetAndDefIndex.isMerged()) {
+ if (!write("merged<")) {
+ return false;
+ }
+ } else if (offsetAndDefIndex.isIgnored()) {
+ if (!write("ignored<")) {
+ return false;
+ }
+ }
+
+ if (!decompilePC(script->offsetToPC(offsetAndDefIndex.specialOffset()),
+ offsetAndDefIndex.specialDefIndex())) {
+ return false;
+ }
+
+ if (!write(">")) {
+ return false;
+ }
+
+ return true;
+ }
+#endif /* DEBUG */
+ return write("(intermediate value)");
+ }
+
+ return decompilePC(script->offsetToPC(offsetAndDefIndex.offset()),
+ offsetAndDefIndex.defIndex());
+}
+
+bool ExpressionDecompiler::init() {
+ cx->check(script);
+ return sprinter.init();
+}
+
+bool ExpressionDecompiler::write(const char* s) { return sprinter.put(s); }
+
+bool ExpressionDecompiler::write(JSString* str) {
+ if (str == cx->names().dotThis) {
+ return write("this");
+ }
+ if (str == cx->names().dotNewTarget) {
+ return write("new.target");
+ }
+ return sprinter.putString(str);
+}
+
+bool ExpressionDecompiler::quote(JSString* s, char quote) {
+ return QuoteString(&sprinter, s, quote);
+}
+
+JSAtom* ExpressionDecompiler::loadAtom(jsbytecode* pc) {
+ return script->getAtom(pc);
+}
+
+JSString* ExpressionDecompiler::loadString(jsbytecode* pc) {
+ return script->getString(pc);
+}
+
+JSAtom* ExpressionDecompiler::getArg(unsigned slot) {
+ MOZ_ASSERT(script->isFunction());
+ MOZ_ASSERT(slot < script->numArgs());
+
+ for (PositionalFormalParameterIter fi(script); fi; fi++) {
+ if (fi.argumentSlot() == slot) {
+ if (!fi.isDestructured()) {
+ return fi.name();
+ }
+
+ // Destructured arguments have no single binding name.
+ static const char destructuredParam[] = "(destructured parameter)";
+ return Atomize(cx, destructuredParam, strlen(destructuredParam));
+ }
+ }
+
+ MOZ_CRASH("No binding");
+}
+
+UniqueChars ExpressionDecompiler::getOutput() {
+ ptrdiff_t len = sprinter.stringEnd() - sprinter.stringAt(0);
+ auto res = cx->make_pod_array<char>(len + 1);
+ if (!res) {
+ return nullptr;
+ }
+ js_memcpy(res.get(), sprinter.stringAt(0), len);
+ res[len] = 0;
+ return res;
+}
+
+} // anonymous namespace
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+static bool DecompileAtPCForStackDump(
+ JSContext* cx, HandleScript script,
+ const OffsetAndDefIndex& offsetAndDefIndex, Sprinter* sp) {
+ // The expression decompiler asserts the script is in the current realm.
+ AutoRealm ar(cx, script);
+
+ LifoAllocScope allocScope(&cx->tempLifoAlloc());
+ BytecodeParser parser(cx, allocScope.alloc(), script);
+ parser.setStackDump();
+ if (!parser.parse()) {
+ return false;
+ }
+
+ ExpressionDecompiler ed(cx, script, parser);
+ ed.setStackDump();
+ if (!ed.init()) {
+ return false;
+ }
+
+ if (!ed.decompilePC(offsetAndDefIndex)) {
+ return false;
+ }
+
+ UniqueChars result = ed.getOutput();
+ if (!result) {
+ return false;
+ }
+
+ return sp->put(result.get());
+}
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
+
+static bool FindStartPC(JSContext* cx, const FrameIter& iter,
+ const BytecodeParser& parser, int spindex,
+ int skipStackHits, const Value& v, jsbytecode** valuepc,
+ uint8_t* defIndex) {
+ jsbytecode* current = *valuepc;
+ *valuepc = nullptr;
+ *defIndex = 0;
+
+ if (spindex < 0 && spindex + int(parser.stackDepthAtPC(current)) < 0) {
+ spindex = JSDVG_SEARCH_STACK;
+ }
+
+ if (spindex == JSDVG_SEARCH_STACK) {
+ size_t index = iter.numFrameSlots();
+
+ // The decompiler may be called from inside functions that are not
+ // called from script, but via the C++ API directly, such as
+ // Invoke. In that case, the youngest script frame may have a
+ // completely unrelated pc and stack depth, so we give up.
+ if (index < size_t(parser.stackDepthAtPC(current))) {
+ return true;
+ }
+
+ // We search from fp->sp to base to find the most recently calculated
+ // value matching v under assumption that it is the value that caused
+ // the exception.
+ int stackHits = 0;
+ Value s;
+ do {
+ if (!index) {
+ return true;
+ }
+ s = iter.frameSlotValue(--index);
+ } while (s != v || stackHits++ != skipStackHits);
+
+ // If the current PC has fewer values on the stack than the index we are
+ // looking for, the blamed value must be one pushed by the current
+ // bytecode (e.g. JSOp::MoreIter), so restore *valuepc.
+ if (index < size_t(parser.stackDepthAtPC(current))) {
+ *valuepc = parser.pcForStackOperand(current, index, defIndex);
+ } else {
+ *valuepc = current;
+ *defIndex = index - size_t(parser.stackDepthAtPC(current));
+ }
+ } else {
+ *valuepc = parser.pcForStackOperand(current, spindex, defIndex);
+ }
+ return true;
+}
+
+static bool DecompileExpressionFromStack(JSContext* cx, int spindex,
+ int skipStackHits, HandleValue v,
+ UniqueChars* res) {
+ MOZ_ASSERT(spindex < 0 || spindex == JSDVG_IGNORE_STACK ||
+ spindex == JSDVG_SEARCH_STACK);
+
+ *res = nullptr;
+
+ /*
+ * Give up if we need deterministic behavior for differential testing.
+ * IonMonkey doesn't use InterpreterFrames and this ensures we get the same
+ * error messages.
+ */
+ if (js::SupportDifferentialTesting()) {
+ return true;
+ }
+
+ if (spindex == JSDVG_IGNORE_STACK) {
+ return true;
+ }
+
+ FrameIter frameIter(cx);
+
+ if (frameIter.done() || !frameIter.hasScript() ||
+ frameIter.realm() != cx->realm() || frameIter.inPrologue()) {
+ return true;
+ }
+
+ /*
+ * FIXME: Fall back if iter.isIon(), since the stack snapshot may be for the
+ * previous pc (see bug 831120).
+ */
+ if (frameIter.isIon()) {
+ return true;
+ }
+
+ RootedScript script(cx, frameIter.script());
+ jsbytecode* valuepc = frameIter.pc();
+
+ MOZ_ASSERT(script->containsPC(valuepc));
+
+ LifoAllocScope allocScope(&cx->tempLifoAlloc());
+ BytecodeParser parser(cx, allocScope.alloc(), frameIter.script());
+ if (!parser.parse()) {
+ return false;
+ }
+
+ uint8_t defIndex;
+ if (!FindStartPC(cx, frameIter, parser, spindex, skipStackHits, v, &valuepc,
+ &defIndex)) {
+ return false;
+ }
+ if (!valuepc) {
+ return true;
+ }
+
+ ExpressionDecompiler ed(cx, script, parser);
+ if (!ed.init()) {
+ return false;
+ }
+ if (!ed.decompilePC(valuepc, defIndex)) {
+ return false;
+ }
+
+ *res = ed.getOutput();
+ return *res != nullptr;
+}
+
+UniqueChars js::DecompileValueGenerator(JSContext* cx, int spindex,
+ HandleValue v, HandleString fallbackArg,
+ int skipStackHits) {
+ RootedString fallback(cx, fallbackArg);
+ {
+ UniqueChars result;
+ if (!DecompileExpressionFromStack(cx, spindex, skipStackHits, v, &result)) {
+ return nullptr;
+ }
+ if (result && strcmp(result.get(), "(intermediate value)")) {
+ return result;
+ }
+ }
+ if (!fallback) {
+ if (v.isUndefined()) {
+ return DuplicateString(
+ cx, js_undefined_str); // Prevent users from seeing "(void 0)"
+ }
+ fallback = ValueToSource(cx, v);
+ if (!fallback) {
+ return nullptr;
+ }
+ }
+
+ return StringToNewUTF8CharsZ(cx, *fallback);
+}
+
+static bool DecompileArgumentFromStack(JSContext* cx, int formalIndex,
+ UniqueChars* res) {
+ MOZ_ASSERT(formalIndex >= 0);
+
+ *res = nullptr;
+
+ /* See note in DecompileExpressionFromStack. */
+ if (js::SupportDifferentialTesting()) {
+ return true;
+ }
+
+ /*
+ * Settle on the nearest script frame, which should be the builtin that
+ * called the intrinsic.
+ */
+ FrameIter frameIter(cx);
+ MOZ_ASSERT(!frameIter.done());
+ MOZ_ASSERT(frameIter.script()->selfHosted());
+
+ /*
+ * Get the second-to-top frame, the non-self-hosted caller of the builtin
+ * that called the intrinsic.
+ */
+ ++frameIter;
+ if (frameIter.done() || !frameIter.hasScript() ||
+ frameIter.script()->selfHosted() || frameIter.realm() != cx->realm()) {
+ return true;
+ }
+
+ RootedScript script(cx, frameIter.script());
+ jsbytecode* current = frameIter.pc();
+
+ MOZ_ASSERT(script->containsPC(current));
+
+ if (current < script->main()) {
+ return true;
+ }
+
+ /* Don't handle getters, setters or calls from fun.call/fun.apply. */
+ JSOp op = JSOp(*current);
+ if (op != JSOp::Call && op != JSOp::CallContent &&
+ op != JSOp::CallIgnoresRv && op != JSOp::New && op != JSOp::NewContent) {
+ return true;
+ }
+
+ if (static_cast<unsigned>(formalIndex) >= GET_ARGC(current)) {
+ return true;
+ }
+
+ LifoAllocScope allocScope(&cx->tempLifoAlloc());
+ BytecodeParser parser(cx, allocScope.alloc(), script);
+ if (!parser.parse()) {
+ return false;
+ }
+
+ bool pushedNewTarget = op == JSOp::New || op == JSOp::NewContent;
+ int formalStackIndex = parser.stackDepthAtPC(current) - GET_ARGC(current) -
+ pushedNewTarget + formalIndex;
+ MOZ_ASSERT(formalStackIndex >= 0);
+ if (uint32_t(formalStackIndex) >= parser.stackDepthAtPC(current)) {
+ return true;
+ }
+
+ ExpressionDecompiler ed(cx, script, parser);
+ if (!ed.init()) {
+ return false;
+ }
+ if (!ed.decompilePCForStackOperand(current, formalStackIndex)) {
+ return false;
+ }
+
+ *res = ed.getOutput();
+ return *res != nullptr;
+}
+
+JSString* js::DecompileArgument(JSContext* cx, int formalIndex, HandleValue v) {
+ {
+ UniqueChars result;
+ if (!DecompileArgumentFromStack(cx, formalIndex, &result)) {
+ return nullptr;
+ }
+ if (result && strcmp(result.get(), "(intermediate value)")) {
+ JS::ConstUTF8CharsZ utf8chars(result.get(), strlen(result.get()));
+ return NewStringCopyUTF8Z(cx, utf8chars);
+ }
+ }
+ if (v.isUndefined()) {
+ return cx->names().undefined; // Prevent users from seeing "(void 0)"
+ }
+
+ return ValueToSource(cx, v);
+}
+
+extern bool js::IsValidBytecodeOffset(JSContext* cx, JSScript* script,
+ size_t offset) {
+ // This could be faster (by following jump instructions if the target
+ // is <= offset).
+ for (BytecodeRange r(cx, script); !r.empty(); r.popFront()) {
+ size_t here = r.frontOffset();
+ if (here >= offset) {
+ return here == offset;
+ }
+ }
+ return false;
+}
+
+/*
+ * There are three possible PCCount profiling states:
+ *
+ * 1. None: Neither scripts nor the runtime have count information.
+ * 2. Profile: Active scripts have count information, the runtime does not.
+ * 3. Query: Scripts do not have count information, the runtime does.
+ *
+ * When starting to profile scripts, counting begins immediately, with all JIT
+ * code discarded and recompiled with counts as necessary. Active interpreter
+ * frames will not begin profiling until they begin executing another script
+ * (via a call or return).
+ *
+ * The below API functions manage transitions to new states, according
+ * to the table below.
+ *
+ * Old State
+ * -------------------------
+ * Function None Profile Query
+ * --------
+ * StartPCCountProfiling Profile Profile Profile
+ * StopPCCountProfiling None Query Query
+ * PurgePCCounts None None None
+ */
+
+static void ReleaseScriptCounts(JSRuntime* rt) {
+ MOZ_ASSERT(rt->scriptAndCountsVector);
+
+ js_delete(rt->scriptAndCountsVector.ref());
+ rt->scriptAndCountsVector = nullptr;
+}
+
+void JS::StartPCCountProfiling(JSContext* cx) {
+ JSRuntime* rt = cx->runtime();
+
+ if (rt->profilingScripts) {
+ return;
+ }
+
+ if (rt->scriptAndCountsVector) {
+ ReleaseScriptCounts(rt);
+ }
+
+ ReleaseAllJITCode(rt->gcContext());
+
+ rt->profilingScripts = true;
+}
+
+void JS::StopPCCountProfiling(JSContext* cx) {
+ JSRuntime* rt = cx->runtime();
+
+ if (!rt->profilingScripts) {
+ return;
+ }
+ MOZ_ASSERT(!rt->scriptAndCountsVector);
+
+ ReleaseAllJITCode(rt->gcContext());
+
+ auto* vec = cx->new_<PersistentRooted<ScriptAndCountsVector>>(
+ cx, ScriptAndCountsVector());
+ if (!vec) {
+ return;
+ }
+
+ for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) {
+ for (auto base = zone->cellIter<BaseScript>(); !base.done(); base.next()) {
+ if (base->hasScriptCounts() && base->hasJitScript()) {
+ if (!vec->append(base->asJSScript())) {
+ return;
+ }
+ }
+ }
+ }
+
+ rt->profilingScripts = false;
+ rt->scriptAndCountsVector = vec;
+}
+
+void JS::PurgePCCounts(JSContext* cx) {
+ JSRuntime* rt = cx->runtime();
+
+ if (!rt->scriptAndCountsVector) {
+ return;
+ }
+ MOZ_ASSERT(!rt->profilingScripts);
+
+ ReleaseScriptCounts(rt);
+}
+
+size_t JS::GetPCCountScriptCount(JSContext* cx) {
+ JSRuntime* rt = cx->runtime();
+
+ if (!rt->scriptAndCountsVector) {
+ return 0;
+ }
+
+ return rt->scriptAndCountsVector->length();
+}
+
+[[nodiscard]] static bool JSONStringProperty(Sprinter& sp, JSONPrinter& json,
+ const char* name, JSString* str) {
+ json.beginStringProperty(name);
+ if (!JSONQuoteString(&sp, str)) {
+ return false;
+ }
+ json.endStringProperty();
+ return true;
+}
+
+JSString* JS::GetPCCountScriptSummary(JSContext* cx, size_t index) {
+ JSRuntime* rt = cx->runtime();
+
+ if (!rt->scriptAndCountsVector ||
+ index >= rt->scriptAndCountsVector->length()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BUFFER_TOO_SMALL);
+ return nullptr;
+ }
+
+ const ScriptAndCounts& sac = (*rt->scriptAndCountsVector)[index];
+ RootedScript script(cx, sac.script);
+
+ Sprinter sp(cx);
+ if (!sp.init()) {
+ return nullptr;
+ }
+
+ JSONPrinter json(sp, false);
+
+ json.beginObject();
+
+ Rooted<JSString*> filenameStr(cx);
+ if (const char* filename = script->filename()) {
+ filenameStr =
+ JS_NewStringCopyUTF8N(cx, JS::UTF8Chars(filename, strlen(filename)));
+ } else {
+ filenameStr = JS_GetEmptyString(cx);
+ }
+ if (!filenameStr) {
+ return nullptr;
+ }
+ if (!JSONStringProperty(sp, json, "file", filenameStr)) {
+ return nullptr;
+ }
+ json.property("line", script->lineno());
+
+ if (JSFunction* fun = script->function()) {
+ if (JSAtom* atom = fun->displayAtom()) {
+ if (!JSONStringProperty(sp, json, "name", atom)) {
+ return nullptr;
+ }
+ }
+ }
+
+ uint64_t total = 0;
+
+ AllBytecodesIterable iter(script);
+ for (BytecodeLocation loc : iter) {
+ if (const PCCounts* counts = sac.maybeGetPCCounts(loc.toRawBytecode())) {
+ total += counts->numExec();
+ }
+ }
+
+ json.beginObjectProperty("totals");
+
+ json.property(PCCounts::numExecName, total);
+
+ uint64_t ionActivity = 0;
+ jit::IonScriptCounts* ionCounts = sac.getIonCounts();
+ while (ionCounts) {
+ for (size_t i = 0; i < ionCounts->numBlocks(); i++) {
+ ionActivity += ionCounts->block(i).hitCount();
+ }
+ ionCounts = ionCounts->previous();
+ }
+ if (ionActivity) {
+ json.property("ion", ionActivity);
+ }
+
+ json.endObject();
+
+ json.endObject();
+
+ if (sp.hadOutOfMemory()) {
+ return nullptr;
+ }
+
+ return NewStringCopyZ<CanGC>(cx, sp.string());
+}
+
+static bool GetPCCountJSON(JSContext* cx, const ScriptAndCounts& sac,
+ Sprinter& sp) {
+ JSONPrinter json(sp, false);
+
+ RootedScript script(cx, sac.script);
+
+ LifoAllocScope allocScope(&cx->tempLifoAlloc());
+ BytecodeParser parser(cx, allocScope.alloc(), script);
+ if (!parser.parse()) {
+ return false;
+ }
+
+ json.beginObject();
+
+ JSString* str = JS_DecompileScript(cx, script);
+ if (!str) {
+ return false;
+ }
+
+ if (!JSONStringProperty(sp, json, "text", str)) {
+ return false;
+ }
+
+ json.property("line", script->lineno());
+
+ json.beginListProperty("opcodes");
+
+ uint64_t hits = 0;
+ for (BytecodeRangeWithPosition range(cx, script); !range.empty();
+ range.popFront()) {
+ jsbytecode* pc = range.frontPC();
+ size_t offset = script->pcToOffset(pc);
+ JSOp op = JSOp(*pc);
+
+ // If the current instruction is a jump target,
+ // then update the number of hits.
+ if (const PCCounts* counts = sac.maybeGetPCCounts(pc)) {
+ hits = counts->numExec();
+ }
+
+ json.beginObject();
+
+ json.property("id", offset);
+ json.property("line", range.frontLineNumber());
+ json.property("name", CodeName(op));
+
+ {
+ ExpressionDecompiler ed(cx, script, parser);
+ if (!ed.init()) {
+ return false;
+ }
+ // defIndex passed here is not used.
+ if (!ed.decompilePC(pc, /* defIndex = */ 0)) {
+ return false;
+ }
+ UniqueChars text = ed.getOutput();
+ if (!text) {
+ return false;
+ }
+
+ JS::ConstUTF8CharsZ utf8chars(text.get(), strlen(text.get()));
+ JSString* str = NewStringCopyUTF8Z(cx, utf8chars);
+ if (!str) {
+ return false;
+ }
+
+ if (!JSONStringProperty(sp, json, "text", str)) {
+ return false;
+ }
+ }
+
+ json.beginObjectProperty("counts");
+ if (hits > 0) {
+ json.property(PCCounts::numExecName, hits);
+ }
+ json.endObject();
+
+ json.endObject();
+
+ // If the current instruction has thrown,
+ // then decrement the hit counts with the number of throws.
+ if (const PCCounts* counts = sac.maybeGetThrowCounts(pc)) {
+ hits -= counts->numExec();
+ }
+ }
+
+ json.endList();
+
+ if (jit::IonScriptCounts* ionCounts = sac.getIonCounts()) {
+ json.beginListProperty("ion");
+
+ while (ionCounts) {
+ json.beginList();
+ for (size_t i = 0; i < ionCounts->numBlocks(); i++) {
+ const jit::IonBlockCounts& block = ionCounts->block(i);
+
+ json.beginObject();
+ json.property("id", block.id());
+ json.property("offset", block.offset());
+
+ json.beginListProperty("successors");
+ for (size_t j = 0; j < block.numSuccessors(); j++) {
+ json.value(block.successor(j));
+ }
+ json.endList();
+
+ json.property("hits", block.hitCount());
+
+ JSString* str = NewStringCopyZ<CanGC>(cx, block.code());
+ if (!str) {
+ return false;
+ }
+
+ if (!JSONStringProperty(sp, json, "code", str)) {
+ return false;
+ }
+
+ json.endObject();
+ }
+ json.endList();
+
+ ionCounts = ionCounts->previous();
+ }
+
+ json.endList();
+ }
+
+ json.endObject();
+
+ return !sp.hadOutOfMemory();
+}
+
+JSString* JS::GetPCCountScriptContents(JSContext* cx, size_t index) {
+ JSRuntime* rt = cx->runtime();
+
+ if (!rt->scriptAndCountsVector ||
+ index >= rt->scriptAndCountsVector->length()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BUFFER_TOO_SMALL);
+ return nullptr;
+ }
+
+ const ScriptAndCounts& sac = (*rt->scriptAndCountsVector)[index];
+ JSScript* script = sac.script;
+
+ Sprinter sp(cx);
+ if (!sp.init()) {
+ return nullptr;
+ }
+
+ {
+ AutoRealm ar(cx, &script->global());
+ if (!GetPCCountJSON(cx, sac, sp)) {
+ return nullptr;
+ }
+ }
+
+ if (sp.hadOutOfMemory()) {
+ return nullptr;
+ }
+
+ return NewStringCopyZ<CanGC>(cx, sp.string());
+}
+
+struct CollectedScripts {
+ MutableHandle<ScriptVector> scripts;
+ bool ok = true;
+
+ explicit CollectedScripts(MutableHandle<ScriptVector> scripts)
+ : scripts(scripts) {}
+
+ static void consider(JSRuntime* rt, void* data, BaseScript* script,
+ const JS::AutoRequireNoGC& nogc) {
+ auto self = static_cast<CollectedScripts*>(data);
+ if (!script->filename()) {
+ return;
+ }
+ if (!self->scripts.append(script->asJSScript())) {
+ self->ok = false;
+ }
+ }
+};
+
+static bool GenerateLcovInfo(JSContext* cx, JS::Realm* realm,
+ GenericPrinter& out) {
+ AutoRealmUnchecked ar(cx, realm);
+
+ // Collect the list of scripts which are part of the current realm.
+
+ MOZ_RELEASE_ASSERT(
+ coverage::IsLCovEnabled(),
+ "Coverage must be enabled for process before generating LCov info");
+
+ // Hold the scripts that we have already flushed, to avoid flushing them
+ // twice.
+ using JSScriptSet = GCHashSet<JSScript*>;
+ Rooted<JSScriptSet> scriptsDone(cx, JSScriptSet(cx));
+
+ Rooted<ScriptVector> queue(cx, ScriptVector(cx));
+
+ {
+ CollectedScripts result(&queue);
+ IterateScripts(cx, realm, &result, &CollectedScripts::consider);
+ if (!result.ok) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ if (queue.length() == 0) {
+ return true;
+ }
+
+ // Ensure the LCovRealm exists to collect info into.
+ coverage::LCovRealm* lcovRealm = realm->lcovRealm();
+ if (!lcovRealm) {
+ return false;
+ }
+
+ // Collect code coverage info for one realm.
+ do {
+ RootedScript script(cx, queue.popCopy());
+ RootedFunction fun(cx);
+
+ JSScriptSet::AddPtr entry = scriptsDone.lookupForAdd(script);
+ if (entry) {
+ continue;
+ }
+
+ if (!coverage::CollectScriptCoverage(script, false)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ script->resetScriptCounts();
+
+ if (!scriptsDone.add(entry, script)) {
+ return false;
+ }
+
+ if (!script->isTopLevel()) {
+ continue;
+ }
+
+ // Iterate from the last to the first object in order to have
+ // the functions them visited in the opposite order when popping
+ // elements from the stack of remaining scripts, such that the
+ // functions are more-less listed with increasing line numbers.
+ auto gcthings = script->gcthings();
+ for (JS::GCCellPtr gcThing : mozilla::Reversed(gcthings)) {
+ if (!gcThing.is<JSObject>()) {
+ continue;
+ }
+ JSObject* obj = &gcThing.as<JSObject>();
+
+ if (!obj->is<JSFunction>()) {
+ continue;
+ }
+ fun = &obj->as<JSFunction>();
+
+ // Ignore asm.js functions
+ if (!fun->isInterpreted()) {
+ continue;
+ }
+
+ // Queue the script in the list of script associated to the
+ // current source.
+ JSScript* childScript = JSFunction::getOrCreateScript(cx, fun);
+ if (!childScript || !queue.append(childScript)) {
+ return false;
+ }
+ }
+ } while (!queue.empty());
+
+ bool isEmpty = true;
+ lcovRealm->exportInto(out, &isEmpty);
+ if (out.hadOutOfMemory()) {
+ return false;
+ }
+
+ return true;
+}
+
+JS_PUBLIC_API UniqueChars js::GetCodeCoverageSummaryAll(JSContext* cx,
+ size_t* length) {
+ Sprinter out(cx);
+ if (!out.init()) {
+ return nullptr;
+ }
+
+ for (RealmsIter realm(cx->runtime()); !realm.done(); realm.next()) {
+ if (!GenerateLcovInfo(cx, realm, out)) {
+ return nullptr;
+ }
+ }
+
+ *length = out.getOffset();
+ return js::DuplicateString(cx, out.string(), *length);
+}
+
+JS_PUBLIC_API UniqueChars js::GetCodeCoverageSummary(JSContext* cx,
+ size_t* length) {
+ Sprinter out(cx);
+ if (!out.init()) {
+ return nullptr;
+ }
+
+ if (!GenerateLcovInfo(cx, cx->realm(), out)) {
+ return nullptr;
+ }
+
+ *length = out.getOffset();
+ return js::DuplicateString(cx, out.string(), *length);
+}
diff --git a/js/src/vm/BytecodeUtil.h b/js/src/vm/BytecodeUtil.h
new file mode 100644
index 0000000000..ba3280ed90
--- /dev/null
+++ b/js/src/vm/BytecodeUtil.h
@@ -0,0 +1,665 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_BytecodeUtil_h
+#define vm_BytecodeUtil_h
+
+/*
+ * JS bytecode definitions.
+ */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/EndianUtils.h"
+
+#include <algorithm>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jstypes.h"
+#include "NamespaceImports.h"
+
+#include "js/TypeDecls.h"
+#include "js/Utility.h"
+#include "js/Value.h"
+#include "vm/BytecodeFormatFlags.h" // JOF_*
+#include "vm/GeneratorResumeKind.h"
+#include "vm/Opcodes.h"
+#include "vm/SharedStencil.h" // js::GCThingIndex
+#include "vm/ThrowMsgKind.h" // ThrowMsgKind, ThrowCondition
+
+namespace js {
+class JS_PUBLIC_API Sprinter;
+} // namespace js
+
+/* Shorthand for type from format. */
+
+static inline uint32_t JOF_TYPE(uint32_t fmt) { return fmt & JOF_TYPEMASK; }
+
+/* Shorthand for mode from format. */
+
+static inline uint32_t JOF_MODE(uint32_t fmt) { return fmt & JOF_MODEMASK; }
+
+/*
+ * Immediate operand getters, setters, and bounds.
+ */
+
+static MOZ_ALWAYS_INLINE uint8_t GET_UINT8(jsbytecode* pc) {
+ return uint8_t(pc[1]);
+}
+
+static MOZ_ALWAYS_INLINE void SET_UINT8(jsbytecode* pc, uint8_t u) {
+ pc[1] = jsbytecode(u);
+}
+
+/* Common uint16_t immediate format helpers. */
+
+static inline jsbytecode UINT16_HI(uint16_t i) { return jsbytecode(i >> 8); }
+
+static inline jsbytecode UINT16_LO(uint16_t i) { return jsbytecode(i); }
+
+static MOZ_ALWAYS_INLINE uint16_t GET_UINT16(const jsbytecode* pc) {
+ uint16_t result;
+ mozilla::NativeEndian::copyAndSwapFromLittleEndian(&result, pc + 1, 1);
+ return result;
+}
+
+static MOZ_ALWAYS_INLINE void SET_UINT16(jsbytecode* pc, uint16_t i) {
+ mozilla::NativeEndian::copyAndSwapToLittleEndian(pc + 1, &i, 1);
+}
+
+static const unsigned UINT16_LIMIT = 1 << 16;
+
+/* Helpers for accessing the offsets of jump opcodes. */
+static const unsigned JUMP_OFFSET_LEN = 4;
+static const int32_t JUMP_OFFSET_MIN = INT32_MIN;
+static const int32_t JUMP_OFFSET_MAX = INT32_MAX;
+
+static MOZ_ALWAYS_INLINE uint32_t GET_UINT24(const jsbytecode* pc) {
+#if MOZ_LITTLE_ENDIAN()
+ // Do a single 32-bit load (for opcode and operand), then shift off the
+ // opcode.
+ uint32_t result;
+ memcpy(&result, pc, 4);
+ return result >> 8;
+#else
+ return uint32_t((pc[3] << 16) | (pc[2] << 8) | pc[1]);
+#endif
+}
+
+static MOZ_ALWAYS_INLINE void SET_UINT24(jsbytecode* pc, uint32_t i) {
+ MOZ_ASSERT(i < (1 << 24));
+
+#if MOZ_LITTLE_ENDIAN()
+ memcpy(pc + 1, &i, 3);
+#else
+ pc[1] = jsbytecode(i);
+ pc[2] = jsbytecode(i >> 8);
+ pc[3] = jsbytecode(i >> 16);
+#endif
+}
+
+static MOZ_ALWAYS_INLINE int8_t GET_INT8(const jsbytecode* pc) {
+ return int8_t(pc[1]);
+}
+
+static MOZ_ALWAYS_INLINE uint32_t GET_UINT32(const jsbytecode* pc) {
+ uint32_t result;
+ mozilla::NativeEndian::copyAndSwapFromLittleEndian(&result, pc + 1, 1);
+ return result;
+}
+
+static MOZ_ALWAYS_INLINE void SET_UINT32(jsbytecode* pc, uint32_t u) {
+ mozilla::NativeEndian::copyAndSwapToLittleEndian(pc + 1, &u, 1);
+}
+
+static MOZ_ALWAYS_INLINE JS::Value GET_INLINE_VALUE(const jsbytecode* pc) {
+ uint64_t raw;
+ mozilla::NativeEndian::copyAndSwapFromLittleEndian(&raw, pc + 1, 1);
+ return JS::Value::fromRawBits(raw);
+}
+
+static MOZ_ALWAYS_INLINE void SET_INLINE_VALUE(jsbytecode* pc,
+ const JS::Value& v) {
+ uint64_t raw = v.asRawBits();
+ mozilla::NativeEndian::copyAndSwapToLittleEndian(pc + 1, &raw, 1);
+}
+
+static MOZ_ALWAYS_INLINE int32_t GET_INT32(const jsbytecode* pc) {
+ return static_cast<int32_t>(GET_UINT32(pc));
+}
+
+static MOZ_ALWAYS_INLINE void SET_INT32(jsbytecode* pc, int32_t i) {
+ SET_UINT32(pc, static_cast<uint32_t>(i));
+}
+
+static MOZ_ALWAYS_INLINE int32_t GET_JUMP_OFFSET(jsbytecode* pc) {
+ return GET_INT32(pc);
+}
+
+static MOZ_ALWAYS_INLINE void SET_JUMP_OFFSET(jsbytecode* pc, int32_t off) {
+ SET_INT32(pc, off);
+}
+
+static const unsigned GCTHING_INDEX_LEN = 4;
+
+static MOZ_ALWAYS_INLINE js::GCThingIndex GET_GCTHING_INDEX(
+ const jsbytecode* pc) {
+ return js::GCThingIndex(GET_UINT32(pc));
+}
+
+static MOZ_ALWAYS_INLINE void SET_GCTHING_INDEX(jsbytecode* pc,
+ js::GCThingIndex index) {
+ SET_UINT32(pc, index.index);
+}
+
+// Index limit is determined by SrcNote::FourByteOffsetFlag, see
+// frontend/BytecodeEmitter.h.
+static const unsigned INDEX_LIMIT_LOG2 = 31;
+static const uint32_t INDEX_LIMIT = uint32_t(1) << INDEX_LIMIT_LOG2;
+
+static inline jsbytecode ARGC_HI(uint16_t argc) { return UINT16_HI(argc); }
+
+static inline jsbytecode ARGC_LO(uint16_t argc) { return UINT16_LO(argc); }
+
+static inline uint16_t GET_ARGC(const jsbytecode* pc) { return GET_UINT16(pc); }
+
+static const unsigned ARGC_LIMIT = UINT16_LIMIT;
+
+static inline uint16_t GET_ARGNO(const jsbytecode* pc) {
+ return GET_UINT16(pc);
+}
+
+static inline void SET_ARGNO(jsbytecode* pc, uint16_t argno) {
+ SET_UINT16(pc, argno);
+}
+
+static const unsigned ARGNO_LEN = 2;
+static const unsigned ARGNO_LIMIT = UINT16_LIMIT;
+
+static inline uint32_t GET_LOCALNO(const jsbytecode* pc) {
+ return GET_UINT24(pc);
+}
+
+static inline void SET_LOCALNO(jsbytecode* pc, uint32_t varno) {
+ SET_UINT24(pc, varno);
+}
+
+static const unsigned LOCALNO_LEN = 3;
+static const unsigned LOCALNO_BITS = 24;
+static const uint32_t LOCALNO_LIMIT = 1 << LOCALNO_BITS;
+
+static inline uint32_t GET_RESUMEINDEX(const jsbytecode* pc) {
+ return GET_UINT24(pc);
+}
+
+static inline void SET_RESUMEINDEX(jsbytecode* pc, uint32_t resumeIndex) {
+ SET_UINT24(pc, resumeIndex);
+}
+
+static const unsigned ICINDEX_LEN = 4;
+
+static inline uint32_t GET_ICINDEX(const jsbytecode* pc) {
+ return GET_UINT32(pc);
+}
+
+static inline void SET_ICINDEX(jsbytecode* pc, uint32_t icIndex) {
+ SET_UINT32(pc, icIndex);
+}
+
+static inline unsigned LoopHeadDepthHint(jsbytecode* pc) {
+ MOZ_ASSERT(JSOp(*pc) == JSOp::LoopHead);
+ return GET_UINT8(pc + 4);
+}
+
+static inline void SetLoopHeadDepthHint(jsbytecode* pc, unsigned loopDepth) {
+ MOZ_ASSERT(JSOp(*pc) == JSOp::LoopHead);
+ uint8_t data = std::min(loopDepth, unsigned(UINT8_MAX));
+ SET_UINT8(pc + 4, data);
+}
+
+static inline bool IsBackedgePC(jsbytecode* pc) {
+ switch (JSOp(*pc)) {
+ case JSOp::Goto:
+ case JSOp::JumpIfTrue:
+ return GET_JUMP_OFFSET(pc) < 0;
+ default:
+ return false;
+ }
+}
+
+static inline bool IsBackedgeForLoopHead(jsbytecode* pc, jsbytecode* loopHead) {
+ MOZ_ASSERT(JSOp(*loopHead) == JSOp::LoopHead);
+ return IsBackedgePC(pc) && pc + GET_JUMP_OFFSET(pc) == loopHead;
+}
+
+/*
+ * Describes the 'hops' component of a JOF_ENVCOORD opcode.
+ *
+ * Note: this component is only 8 bits wide, limiting the maximum number of
+ * scopes between a use and def to roughly 255. This is a pretty small limit but
+ * note that SpiderMonkey's recursive descent parser can only parse about this
+ * many functions before hitting the C-stack recursion limit so this shouldn't
+ * be a significant limitation in practice.
+ */
+
+static inline uint8_t GET_ENVCOORD_HOPS(jsbytecode* pc) {
+ return GET_UINT8(pc);
+}
+
+static inline void SET_ENVCOORD_HOPS(jsbytecode* pc, uint8_t hops) {
+ SET_UINT8(pc, hops);
+}
+
+static const unsigned ENVCOORD_HOPS_LEN = 1;
+static const unsigned ENVCOORD_HOPS_BITS = 8;
+static const unsigned ENVCOORD_HOPS_LIMIT = 1 << ENVCOORD_HOPS_BITS;
+
+/* Describes the 'slot' component of a JOF_ENVCOORD opcode. */
+static inline uint32_t GET_ENVCOORD_SLOT(const jsbytecode* pc) {
+ return GET_UINT24(pc);
+}
+
+static inline void SET_ENVCOORD_SLOT(jsbytecode* pc, uint32_t slot) {
+ SET_UINT24(pc, slot);
+}
+
+static const unsigned ENVCOORD_SLOT_LEN = 3;
+static const unsigned ENVCOORD_SLOT_BITS = 24;
+static const uint32_t ENVCOORD_SLOT_LIMIT = 1 << ENVCOORD_SLOT_BITS;
+
+struct JSCodeSpec {
+ uint8_t length; /* length including opcode byte */
+ int8_t nuses; /* arity, -1 if variadic */
+ int8_t ndefs; /* number of stack results */
+ uint32_t format; /* immediate operand format */
+};
+
+namespace js {
+
+extern const JSCodeSpec CodeSpecTable[];
+
+inline const JSCodeSpec& CodeSpec(JSOp op) {
+ return CodeSpecTable[uint8_t(op)];
+}
+
+extern const char* const CodeNameTable[];
+
+inline const char* CodeName(JSOp op) { return CodeNameTable[uint8_t(op)]; }
+
+/* Shorthand for type from opcode. */
+
+static inline uint32_t JOF_OPTYPE(JSOp op) {
+ return JOF_TYPE(CodeSpec(op).format);
+}
+
+static inline bool IsJumpOpcode(JSOp op) { return JOF_OPTYPE(op) == JOF_JUMP; }
+
+static inline bool BytecodeFallsThrough(JSOp op) {
+ // Note:
+ // * JSOp::Yield/JSOp::Await is considered to fall through, like JSOp::Call.
+ switch (op) {
+ case JSOp::Goto:
+ case JSOp::Default:
+ case JSOp::Return:
+ case JSOp::RetRval:
+ case JSOp::FinalYieldRval:
+ case JSOp::Throw:
+ case JSOp::ThrowMsg:
+ case JSOp::ThrowSetConst:
+ case JSOp::TableSwitch:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static inline bool BytecodeIsJumpTarget(JSOp op) {
+ switch (op) {
+ case JSOp::JumpTarget:
+ case JSOp::LoopHead:
+ case JSOp::AfterYield:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// The JSOp argument is superflous, but we are using it to avoid a
+// store forwarding Bug on some Android phones; see Bug 1833315
+MOZ_ALWAYS_INLINE unsigned StackUses(JSOp op, jsbytecode* pc) {
+ MOZ_ASSERT(op == JSOp(*pc));
+ int nuses = CodeSpec(op).nuses;
+ if (nuses >= 0) {
+ return nuses;
+ }
+
+ MOZ_ASSERT(nuses == -1);
+ switch (op) {
+ case JSOp::PopN:
+ return GET_UINT16(pc);
+ case JSOp::New:
+ case JSOp::NewContent:
+ case JSOp::SuperCall:
+ return 2 + GET_ARGC(pc) + 1;
+ default:
+ /* stack: fun, this, [argc arguments] */
+ MOZ_ASSERT(op == JSOp::Call || op == JSOp::CallContent ||
+ op == JSOp::CallIgnoresRv || op == JSOp::Eval ||
+ op == JSOp::CallIter || op == JSOp::CallContentIter ||
+ op == JSOp::StrictEval);
+ return 2 + GET_ARGC(pc);
+ }
+}
+
+MOZ_ALWAYS_INLINE unsigned StackDefs(JSOp op) {
+ int ndefs = CodeSpec(op).ndefs;
+ MOZ_ASSERT(ndefs >= 0);
+ return ndefs;
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+/*
+ * Given bytecode address pc in script's main program code, compute the operand
+ * stack depth just before (JSOp) *pc executes. If *pc is not reachable, return
+ * false.
+ */
+extern bool ReconstructStackDepth(JSContext* cx, JSScript* script,
+ jsbytecode* pc, uint32_t* depth,
+ bool* reachablePC);
+#endif
+
+} /* namespace js */
+
+#define JSDVG_IGNORE_STACK 0
+#define JSDVG_SEARCH_STACK 1
+
+namespace js {
+
+/*
+ * Find the source expression that resulted in v, and return a newly allocated
+ * C-string containing it. Fall back on v's string conversion (fallback) if we
+ * can't find the bytecode that generated and pushed v on the operand stack.
+ *
+ * Search the current stack frame if spindex is JSDVG_SEARCH_STACK. Don't
+ * look for v on the stack if spindex is JSDVG_IGNORE_STACK. Otherwise,
+ * spindex is the negative index of v, measured from cx->fp->sp, or from a
+ * lower frame's sp if cx->fp is native.
+ *
+ * The optional argument skipStackHits can be used to skip a hit in the stack
+ * frame. This can be useful in self-hosted code that wants to report value
+ * errors containing decompiled values that are useful for the user, instead of
+ * values used internally by the self-hosted code.
+ *
+ * The caller must call JS_free on the result after a successful call.
+ */
+UniqueChars DecompileValueGenerator(JSContext* cx, int spindex, HandleValue v,
+ HandleString fallback,
+ int skipStackHits = 0);
+
+/*
+ * Decompile the formal argument at formalIndex in the nearest non-builtin
+ * stack frame, falling back with converting v to source.
+ */
+JSString* DecompileArgument(JSContext* cx, int formalIndex, HandleValue v);
+
+static inline unsigned GetOpLength(JSOp op) {
+ MOZ_ASSERT(uint8_t(op) < JSOP_LIMIT);
+ MOZ_ASSERT(CodeSpec(op).length > 0);
+ return CodeSpec(op).length;
+}
+
+static inline unsigned GetBytecodeLength(const jsbytecode* pc) {
+ JSOp op = (JSOp)*pc;
+ return GetOpLength(op);
+}
+
+static inline bool BytecodeIsPopped(jsbytecode* pc) {
+ jsbytecode* next = pc + GetBytecodeLength(pc);
+ return JSOp(*next) == JSOp::Pop;
+}
+
+extern bool IsValidBytecodeOffset(JSContext* cx, JSScript* script,
+ size_t offset);
+
+inline bool IsArgOp(JSOp op) { return JOF_OPTYPE(op) == JOF_QARG; }
+
+inline bool IsLocalOp(JSOp op) { return JOF_OPTYPE(op) == JOF_LOCAL; }
+
+inline bool IsAliasedVarOp(JSOp op) { return JOF_OPTYPE(op) == JOF_ENVCOORD; }
+
+inline bool IsGlobalOp(JSOp op) { return CodeSpec(op).format & JOF_GNAME; }
+
+inline bool IsPropertySetOp(JSOp op) {
+ return CodeSpec(op).format & JOF_PROPSET;
+}
+
+inline bool IsPropertyInitOp(JSOp op) {
+ return CodeSpec(op).format & JOF_PROPINIT;
+}
+
+inline bool IsLooseEqualityOp(JSOp op) {
+ return op == JSOp::Eq || op == JSOp::Ne;
+}
+
+inline bool IsStrictEqualityOp(JSOp op) {
+ return op == JSOp::StrictEq || op == JSOp::StrictNe;
+}
+
+inline bool IsEqualityOp(JSOp op) {
+ return IsLooseEqualityOp(op) || IsStrictEqualityOp(op);
+}
+
+inline bool IsRelationalOp(JSOp op) {
+ return op == JSOp::Lt || op == JSOp::Le || op == JSOp::Gt || op == JSOp::Ge;
+}
+
+inline bool IsCheckStrictOp(JSOp op) {
+ return CodeSpec(op).format & JOF_CHECKSTRICT;
+}
+
+inline bool IsNameOp(JSOp op) { return CodeSpec(op).format & JOF_NAME; }
+
+#ifdef DEBUG
+inline bool IsCheckSloppyOp(JSOp op) {
+ return CodeSpec(op).format & JOF_CHECKSLOPPY;
+}
+#endif
+
+inline bool IsAtomOp(JSOp op) { return JOF_OPTYPE(op) == JOF_ATOM; }
+
+inline bool IsGetPropOp(JSOp op) { return op == JSOp::GetProp; }
+
+inline bool IsGetPropPC(const jsbytecode* pc) { return IsGetPropOp(JSOp(*pc)); }
+
+inline bool IsHiddenInitOp(JSOp op) {
+ return op == JSOp::InitHiddenProp || op == JSOp::InitHiddenElem ||
+ op == JSOp::InitHiddenPropGetter || op == JSOp::InitHiddenElemGetter ||
+ op == JSOp::InitHiddenPropSetter || op == JSOp::InitHiddenElemSetter;
+}
+
+inline bool IsLockedInitOp(JSOp op) {
+ return op == JSOp::InitLockedProp || op == JSOp::InitLockedElem;
+}
+
+inline bool IsStrictSetPC(jsbytecode* pc) {
+ JSOp op = JSOp(*pc);
+ return op == JSOp::StrictSetProp || op == JSOp::StrictSetName ||
+ op == JSOp::StrictSetGName || op == JSOp::StrictSetElem;
+}
+
+inline bool IsSetPropOp(JSOp op) {
+ return op == JSOp::SetProp || op == JSOp::StrictSetProp ||
+ op == JSOp::SetName || op == JSOp::StrictSetName ||
+ op == JSOp::SetGName || op == JSOp::StrictSetGName;
+}
+
+inline bool IsSetPropPC(const jsbytecode* pc) { return IsSetPropOp(JSOp(*pc)); }
+
+inline bool IsGetElemOp(JSOp op) { return op == JSOp::GetElem; }
+
+inline bool IsGetElemPC(const jsbytecode* pc) { return IsGetElemOp(JSOp(*pc)); }
+
+inline bool IsSetElemOp(JSOp op) {
+ return op == JSOp::SetElem || op == JSOp::StrictSetElem;
+}
+
+inline bool IsSetElemPC(const jsbytecode* pc) { return IsSetElemOp(JSOp(*pc)); }
+
+inline bool IsElemPC(const jsbytecode* pc) {
+ return CodeSpec(JSOp(*pc)).format & JOF_ELEM;
+}
+
+inline bool IsInvokeOp(JSOp op) { return CodeSpec(op).format & JOF_INVOKE; }
+
+inline bool IsInvokePC(jsbytecode* pc) { return IsInvokeOp(JSOp(*pc)); }
+
+inline bool IsStrictEvalPC(jsbytecode* pc) {
+ JSOp op = JSOp(*pc);
+ return op == JSOp::StrictEval || op == JSOp::StrictSpreadEval;
+}
+
+inline bool IsConstructOp(JSOp op) {
+ return CodeSpec(op).format & JOF_CONSTRUCT;
+}
+inline bool IsConstructPC(const jsbytecode* pc) {
+ return IsConstructOp(JSOp(*pc));
+}
+
+inline bool IsSpreadOp(JSOp op) { return CodeSpec(op).format & JOF_SPREAD; }
+
+inline bool IsSpreadPC(const jsbytecode* pc) { return IsSpreadOp(JSOp(*pc)); }
+
+inline bool OpUsesEnvironmentChain(JSOp op) {
+ return CodeSpec(op).format & JOF_USES_ENV;
+}
+
+static inline int32_t GetBytecodeInteger(jsbytecode* pc) {
+ switch (JSOp(*pc)) {
+ case JSOp::Zero:
+ return 0;
+ case JSOp::One:
+ return 1;
+ case JSOp::Uint16:
+ return GET_UINT16(pc);
+ case JSOp::Uint24:
+ return GET_UINT24(pc);
+ case JSOp::Int8:
+ return GET_INT8(pc);
+ case JSOp::Int32:
+ return GET_INT32(pc);
+ default:
+ MOZ_CRASH("Bad op");
+ }
+}
+
+inline bool BytecodeOpHasIC(JSOp op) { return CodeSpec(op).format & JOF_IC; }
+
+inline void GetCheckPrivateFieldOperands(jsbytecode* pc,
+ ThrowCondition* throwCondition,
+ ThrowMsgKind* throwKind) {
+ static_assert(sizeof(ThrowCondition) == sizeof(uint8_t));
+ static_assert(sizeof(ThrowMsgKind) == sizeof(uint8_t));
+
+ MOZ_ASSERT(JSOp(*pc) == JSOp::CheckPrivateField);
+ uint8_t throwConditionByte = GET_UINT8(pc);
+ uint8_t throwKindByte = GET_UINT8(pc + 1);
+
+ *throwCondition = static_cast<ThrowCondition>(throwConditionByte);
+ *throwKind = static_cast<ThrowMsgKind>(throwKindByte);
+
+ MOZ_ASSERT(*throwCondition == ThrowCondition::ThrowHas ||
+ *throwCondition == ThrowCondition::ThrowHasNot ||
+ *throwCondition == ThrowCondition::OnlyCheckRhs);
+
+ MOZ_ASSERT(*throwKind == ThrowMsgKind::PrivateDoubleInit ||
+ *throwKind == ThrowMsgKind::PrivateBrandDoubleInit ||
+ *throwKind == ThrowMsgKind::MissingPrivateOnGet ||
+ *throwKind == ThrowMsgKind::MissingPrivateOnSet);
+}
+
+// Return true iff the combination of the ThrowCondition and hasOwn result
+// will throw an exception.
+static inline bool CheckPrivateFieldWillThrow(ThrowCondition condition,
+ bool hasOwn) {
+ if ((condition == ThrowCondition::ThrowHasNot && !hasOwn) ||
+ (condition == ThrowCondition::ThrowHas && hasOwn)) {
+ // Met a throw condition.
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Counts accumulated for a single opcode in a script. The counts tracked vary
+ * between opcodes, and this structure ensures that counts are accessed in a
+ * coherent fashion.
+ */
+class PCCounts {
+ /*
+ * Offset of the pc inside the script. This fields is used to lookup opcode
+ * which have annotations.
+ */
+ size_t pcOffset_;
+
+ /*
+ * Record the number of execution of one instruction, or the number of
+ * throws executed.
+ */
+ uint64_t numExec_;
+
+ public:
+ explicit PCCounts(size_t off) : pcOffset_(off), numExec_(0) {}
+
+ size_t pcOffset() const { return pcOffset_; }
+
+ // Used for sorting and searching.
+ bool operator<(const PCCounts& rhs) const {
+ return pcOffset_ < rhs.pcOffset_;
+ }
+
+ uint64_t& numExec() { return numExec_; }
+ uint64_t numExec() const { return numExec_; }
+
+ static const char numExecName[];
+};
+
+static inline jsbytecode* GetNextPc(jsbytecode* pc) {
+ return pc + GetBytecodeLength(pc);
+}
+
+inline GeneratorResumeKind IntToResumeKind(int32_t value) {
+ MOZ_ASSERT(uint32_t(value) <= uint32_t(GeneratorResumeKind::Return));
+ return static_cast<GeneratorResumeKind>(value);
+}
+
+inline GeneratorResumeKind ResumeKindFromPC(jsbytecode* pc) {
+ MOZ_ASSERT(JSOp(*pc) == JSOp::ResumeKind);
+ return IntToResumeKind(GET_UINT8(pc));
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+
+enum class DisassembleSkeptically { No, Yes };
+
+/*
+ * Disassemblers, for debugging only.
+ */
+[[nodiscard]] extern bool Disassemble(
+ JSContext* cx, JS::Handle<JSScript*> script, bool lines, Sprinter* sp,
+ DisassembleSkeptically skeptically = DisassembleSkeptically::No);
+
+unsigned Disassemble1(JSContext* cx, JS::Handle<JSScript*> script,
+ jsbytecode* pc, unsigned loc, bool lines, Sprinter* sp);
+
+#endif
+
+[[nodiscard]] extern bool DumpRealmPCCounts(JSContext* cx);
+
+} // namespace js
+
+#endif /* vm_BytecodeUtil_h */
diff --git a/js/src/vm/Caches.h b/js/src/vm/Caches.h
new file mode 100644
index 0000000000..c1d9caefdb
--- /dev/null
+++ b/js/src/vm/Caches.h
@@ -0,0 +1,568 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Caches_h
+#define vm_Caches_h
+
+#include "mozilla/Array.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MruCache.h"
+#include "mozilla/TemplateLib.h"
+#include "mozilla/UniquePtr.h"
+
+#include "frontend/ScopeBindingCache.h"
+#include "gc/Tracer.h"
+#include "js/RootingAPI.h"
+#include "js/TypeDecls.h"
+#include "vm/JSScript.h"
+#include "vm/Shape.h"
+#include "vm/StencilCache.h" // js::StencilCache
+#include "vm/StringType.h"
+
+namespace js {
+
+class SrcNote;
+
+/*
+ * GetSrcNote cache to avoid O(n^2) growth in finding a source note for a
+ * given pc in a script. We use the script->code pointer to tag the cache,
+ * instead of the script address itself, so that source notes are always found
+ * by offset from the bytecode with which they were generated.
+ */
+struct GSNCache {
+ typedef HashMap<jsbytecode*, const SrcNote*, PointerHasher<jsbytecode*>,
+ SystemAllocPolicy>
+ Map;
+
+ jsbytecode* code;
+ Map map;
+
+ GSNCache() : code(nullptr) {}
+
+ void purge();
+};
+
+struct EvalCacheEntry {
+ JSLinearString* str;
+ JSScript* script;
+ JSScript* callerScript;
+ jsbytecode* pc;
+
+ // We sweep this cache after a nursery collection to update entries with
+ // string keys that have been tenured.
+ //
+ // The entire cache is purged on a major GC, so we don't need to sweep it
+ // then.
+ bool traceWeak(JSTracer* trc) {
+ MOZ_ASSERT(trc->kind() == JS::TracerKind::MinorSweeping);
+ return TraceManuallyBarrieredWeakEdge(trc, &str, "EvalCacheEntry::str");
+ }
+};
+
+struct EvalCacheLookup {
+ explicit EvalCacheLookup(JSContext* cx) : str(cx), callerScript(cx) {}
+ Rooted<JSLinearString*> str;
+ RootedScript callerScript;
+ MOZ_INIT_OUTSIDE_CTOR jsbytecode* pc;
+};
+
+struct EvalCacheHashPolicy {
+ using Lookup = EvalCacheLookup;
+
+ static HashNumber hash(const Lookup& l);
+ static bool match(const EvalCacheEntry& entry, const EvalCacheLookup& l);
+};
+
+using EvalCache =
+ GCHashSet<EvalCacheEntry, EvalCacheHashPolicy, SystemAllocPolicy>;
+
+class MegamorphicCacheEntry {
+ // Receiver object's shape.
+ Shape* shape_ = nullptr;
+
+ // The atom or symbol property being accessed.
+ PropertyKey key_;
+
+ // Slot offset and isFixedSlot flag of the data property.
+ TaggedSlotOffset slotOffset_;
+
+ // This entry is valid iff the generation matches the cache's generation.
+ uint16_t generation_ = 0;
+
+ // Number of hops on the proto chain to get to the holder object. If this is
+ // zero, the property exists on the receiver object. It can also be one of
+ // the sentinel values indicating a missing property lookup.
+ uint8_t numHops_ = 0;
+
+ friend class MegamorphicCache;
+
+ public:
+ static constexpr uint8_t MaxHopsForDataProperty = UINT8_MAX - 2;
+ static constexpr uint8_t NumHopsForMissingProperty = UINT8_MAX - 1;
+ static constexpr uint8_t NumHopsForMissingOwnProperty = UINT8_MAX;
+
+ void init(Shape* shape, PropertyKey key, uint16_t generation, uint8_t numHops,
+ TaggedSlotOffset slotOffset) {
+ shape_ = shape;
+ key_ = key;
+ slotOffset_ = slotOffset;
+ generation_ = generation;
+ numHops_ = numHops;
+ MOZ_ASSERT(numHops_ == numHops, "numHops must fit in numHops_");
+ }
+ bool isMissingProperty() const {
+ return numHops_ == NumHopsForMissingProperty;
+ }
+ bool isMissingOwnProperty() const {
+ return numHops_ == NumHopsForMissingOwnProperty;
+ }
+ bool isDataProperty() const { return numHops_ <= MaxHopsForDataProperty; }
+ uint16_t numHops() const {
+ MOZ_ASSERT(isDataProperty());
+ return numHops_;
+ }
+ TaggedSlotOffset slotOffset() const {
+ MOZ_ASSERT(isDataProperty());
+ return slotOffset_;
+ }
+
+ static constexpr size_t offsetOfShape() {
+ return offsetof(MegamorphicCacheEntry, shape_);
+ }
+
+ static constexpr size_t offsetOfKey() {
+ return offsetof(MegamorphicCacheEntry, key_);
+ }
+
+ static constexpr size_t offsetOfGeneration() {
+ return offsetof(MegamorphicCacheEntry, generation_);
+ }
+
+ static constexpr size_t offsetOfSlotOffset() {
+ return offsetof(MegamorphicCacheEntry, slotOffset_);
+ }
+
+ static constexpr size_t offsetOfNumHops() {
+ return offsetof(MegamorphicCacheEntry, numHops_);
+ }
+};
+
+// [SMDOC] Megamorphic Property Lookup Cache (MegamorphicCache)
+//
+// MegamorphicCache is a data structure used to speed up megamorphic property
+// lookups from JIT code. The same cache is currently used for both GetProp and
+// HasProp (in, hasOwnProperty) operations.
+//
+// This is implemented as a fixed-size array of entries. Lookups are performed
+// based on the receiver object's Shape + PropertyKey. If found in the cache,
+// the result of a lookup represents either:
+//
+// * A data property on the receiver or on its proto chain (stored as number of
+// 'hops' up the proto chain + the slot of the data property).
+//
+// * A missing property on the receiver or its proto chain.
+//
+// * A missing property on the receiver, but it might exist on the proto chain.
+// This lets us optimize hasOwnProperty better.
+//
+// Collisions are handled by simply overwriting the previous entry stored in the
+// slot. This is sufficient to achieve a high hit rate on typical web workloads
+// while ensuring cache lookups are always fast and simple.
+//
+// Lookups always check the receiver object's shape (ensuring the properties and
+// prototype are unchanged). Because the cache also caches lookups on the proto
+// chain, Watchtower is used to invalidate the cache when prototype objects are
+// mutated. This is done by incrementing the cache's generation counter to
+// invalidate all entries.
+//
+// The cache is also invalidated on each major GC.
+class MegamorphicCache {
+ public:
+ using Entry = MegamorphicCacheEntry;
+
+ static constexpr size_t NumEntries = 1024;
+ static constexpr uint8_t ShapeHashShift1 =
+ mozilla::tl::FloorLog2<alignof(Shape)>::value;
+ static constexpr uint8_t ShapeHashShift2 =
+ ShapeHashShift1 + mozilla::tl::FloorLog2<NumEntries>::value;
+
+ static_assert(mozilla::IsPowerOfTwo(alignof(Shape)) &&
+ mozilla::IsPowerOfTwo(NumEntries),
+ "FloorLog2 is exact because alignof(Shape) and NumEntries are "
+ "both powers of two");
+
+ private:
+ mozilla::Array<Entry, NumEntries> entries_;
+
+ // Generation counter used to invalidate all entries.
+ uint16_t generation_ = 0;
+
+ // NOTE: this logic is mirrored in MacroAssembler::emitMegamorphicCacheLookup
+ Entry& getEntry(Shape* shape, PropertyKey key) {
+ static_assert(mozilla::IsPowerOfTwo(NumEntries),
+ "NumEntries must be a power-of-two for fast modulo");
+ uintptr_t hash = uintptr_t(shape) >> ShapeHashShift1;
+ hash ^= uintptr_t(shape) >> ShapeHashShift2;
+ hash += HashAtomOrSymbolPropertyKey(key);
+ return entries_[hash % NumEntries];
+ }
+
+ public:
+ void bumpGeneration() {
+ generation_++;
+ if (generation_ == 0) {
+ // Generation overflowed. Invalidate the whole cache.
+ for (size_t i = 0; i < NumEntries; i++) {
+ entries_[i].shape_ = nullptr;
+ }
+ }
+ }
+ bool lookup(Shape* shape, PropertyKey key, Entry** entryp) {
+ Entry& entry = getEntry(shape, key);
+ *entryp = &entry;
+ return (entry.shape_ == shape && entry.key_ == key &&
+ entry.generation_ == generation_);
+ }
+ void initEntryForMissingProperty(Entry* entry, Shape* shape,
+ PropertyKey key) {
+ entry->init(shape, key, generation_, Entry::NumHopsForMissingProperty,
+ TaggedSlotOffset());
+ }
+ void initEntryForMissingOwnProperty(Entry* entry, Shape* shape,
+ PropertyKey key) {
+ entry->init(shape, key, generation_, Entry::NumHopsForMissingOwnProperty,
+ TaggedSlotOffset());
+ }
+ void initEntryForDataProperty(Entry* entry, Shape* shape, PropertyKey key,
+ size_t numHops, TaggedSlotOffset slotOffset) {
+ if (numHops > Entry::MaxHopsForDataProperty) {
+ return;
+ }
+ entry->init(shape, key, generation_, numHops, slotOffset);
+ }
+
+ static constexpr size_t offsetOfEntries() {
+ return offsetof(MegamorphicCache, entries_);
+ }
+
+ static constexpr size_t offsetOfGeneration() {
+ return offsetof(MegamorphicCache, generation_);
+ }
+};
+
+class MegamorphicSetPropCacheEntry {
+ Shape* beforeShape_ = nullptr;
+ Shape* afterShape_ = nullptr;
+
+ // The atom or symbol property being accessed.
+ PropertyKey key_;
+
+ // Slot offset and isFixedSlot flag of the data property.
+ TaggedSlotOffset slotOffset_;
+
+ // If slots need to be grown, this is the new capacity we need.
+ uint16_t newCapacity_ = 0;
+
+ // This entry is valid iff the generation matches the cache's generation.
+ uint16_t generation_ = 0;
+
+ friend class MegamorphicSetPropCache;
+
+ public:
+ void init(Shape* beforeShape, Shape* afterShape, PropertyKey key,
+ uint16_t generation, TaggedSlotOffset slotOffset,
+ uint16_t newCapacity) {
+ beforeShape_ = beforeShape;
+ afterShape_ = afterShape;
+ key_ = key;
+ slotOffset_ = slotOffset;
+ newCapacity_ = newCapacity;
+ generation_ = generation;
+ }
+ TaggedSlotOffset slotOffset() const { return slotOffset_; }
+ Shape* afterShape() const { return afterShape_; }
+
+ static constexpr size_t offsetOfShape() {
+ return offsetof(MegamorphicSetPropCacheEntry, beforeShape_);
+ }
+ static constexpr size_t offsetOfAfterShape() {
+ return offsetof(MegamorphicSetPropCacheEntry, afterShape_);
+ }
+
+ static constexpr size_t offsetOfKey() {
+ return offsetof(MegamorphicSetPropCacheEntry, key_);
+ }
+
+ static constexpr size_t offsetOfNewCapacity() {
+ return offsetof(MegamorphicSetPropCacheEntry, newCapacity_);
+ }
+
+ static constexpr size_t offsetOfGeneration() {
+ return offsetof(MegamorphicSetPropCacheEntry, generation_);
+ }
+
+ static constexpr size_t offsetOfSlotOffset() {
+ return offsetof(MegamorphicSetPropCacheEntry, slotOffset_);
+ }
+};
+
+class MegamorphicSetPropCache {
+ public:
+ using Entry = MegamorphicSetPropCacheEntry;
+ // We can get more hits if we increase this, but this seems to be around
+ // the sweet spot where we are getting most of the hits we would get with
+ // an infinitely sized cache
+ static constexpr size_t NumEntries = 1024;
+ static constexpr uint8_t ShapeHashShift1 =
+ mozilla::tl::FloorLog2<alignof(Shape)>::value;
+ static constexpr uint8_t ShapeHashShift2 =
+ ShapeHashShift1 + mozilla::tl::FloorLog2<NumEntries>::value;
+
+ static_assert(mozilla::IsPowerOfTwo(alignof(Shape)) &&
+ mozilla::IsPowerOfTwo(NumEntries),
+ "FloorLog2 is exact because alignof(Shape) and NumEntries are "
+ "both powers of two");
+
+ private:
+ mozilla::Array<Entry, NumEntries> entries_;
+
+ // Generation counter used to invalidate all entries.
+ uint16_t generation_ = 0;
+
+ Entry& getEntry(Shape* beforeShape, PropertyKey key) {
+ static_assert(mozilla::IsPowerOfTwo(NumEntries),
+ "NumEntries must be a power-of-two for fast modulo");
+ uintptr_t hash = uintptr_t(beforeShape) >> ShapeHashShift1;
+ hash ^= uintptr_t(beforeShape) >> ShapeHashShift2;
+ hash += HashAtomOrSymbolPropertyKey(key);
+ return entries_[hash % NumEntries];
+ }
+
+ public:
+ void bumpGeneration() {
+ generation_++;
+ if (generation_ == 0) {
+ // Generation overflowed. Invalidate the whole cache.
+ for (size_t i = 0; i < NumEntries; i++) {
+ entries_[i].beforeShape_ = nullptr;
+ }
+ }
+ }
+ void set(Shape* beforeShape, Shape* afterShape, PropertyKey key,
+ TaggedSlotOffset slotOffset, uint32_t newCapacity) {
+ uint16_t newSlots = (uint16_t)newCapacity;
+ if (newSlots != newCapacity) {
+ return;
+ }
+ Entry& entry = getEntry(beforeShape, key);
+ entry.init(beforeShape, afterShape, key, generation_, slotOffset, newSlots);
+ }
+
+#ifdef DEBUG
+ bool lookup(Shape* beforeShape, PropertyKey key, Entry** entryp) {
+ Entry& entry = getEntry(beforeShape, key);
+ *entryp = &entry;
+ return (entry.beforeShape_ == beforeShape && entry.key_ == key &&
+ entry.generation_ == generation_);
+ }
+#endif
+
+ static constexpr size_t offsetOfEntries() {
+ return offsetof(MegamorphicSetPropCache, entries_);
+ }
+
+ static constexpr size_t offsetOfGeneration() {
+ return offsetof(MegamorphicSetPropCache, generation_);
+ }
+};
+
+// Cache for AtomizeString, mapping JSString* or JS::Latin1Char* to the
+// corresponding JSAtom*. The cache has three different optimizations:
+//
+// * The two most recent lookups are cached. This has a hit rate of 30-65% on
+// typical web workloads.
+//
+// * MruCache is used for short JS::Latin1Char strings.
+//
+// * For longer strings, there's also a JSLinearString* => JSAtom* HashMap,
+// because hashing the string characters repeatedly can be slow.
+// This map is also used by nursery GC to de-duplicate strings to atoms.
+//
+// This cache is purged on minor and major GC.
+class StringToAtomCache {
+ public:
+ struct LastLookup {
+ JSString* string = nullptr;
+ JSAtom* atom = nullptr;
+
+ static constexpr size_t offsetOfString() {
+ return offsetof(LastLookup, string);
+ }
+
+ static constexpr size_t offsetOfAtom() {
+ return offsetof(LastLookup, atom);
+ }
+ };
+ static constexpr size_t NumLastLookups = 2;
+
+ struct AtomTableKey {
+ explicit AtomTableKey(const JS::Latin1Char* str, size_t len)
+ : string_(str), length_(len) {
+ hash_ = mozilla::HashString(string_, length_);
+ }
+
+ const JS::Latin1Char* string_;
+ size_t length_;
+ uint32_t hash_;
+ };
+
+ private:
+ struct RopeAtomCache
+ : public mozilla::MruCache<AtomTableKey, JSAtom*, RopeAtomCache> {
+ static HashNumber Hash(const AtomTableKey& key) { return key.hash_; }
+ static bool Match(const AtomTableKey& key, const JSAtom* val) {
+ JS::AutoCheckCannotGC nogc;
+ return val->length() == key.length_ &&
+ EqualChars(key.string_, val->latin1Chars(nogc), key.length_);
+ }
+ };
+ using Map =
+ HashMap<JSString*, JSAtom*, PointerHasher<JSString*>, SystemAllocPolicy>;
+ Map map_;
+ mozilla::Array<LastLookup, NumLastLookups> lastLookups_;
+ RopeAtomCache ropeCharCache_;
+
+ public:
+ // Don't use the HashMap for short strings. Hashing them is less expensive.
+ // But the length needs to long enough to cover common identifiers in React.
+ static constexpr size_t MinStringLength = 39;
+
+ JSAtom* lookupInMap(JSString* s) const {
+ MOZ_ASSERT(s->inStringToAtomCache());
+ MOZ_ASSERT(s->length() >= MinStringLength);
+
+ auto p = map_.lookup(s);
+ JSAtom* atom = p ? p->value() : nullptr;
+ return atom;
+ }
+
+ MOZ_ALWAYS_INLINE JSAtom* lookup(JSString* s) const {
+ MOZ_ASSERT(!s->isAtom());
+ for (const LastLookup& entry : lastLookups_) {
+ if (entry.string == s) {
+ return entry.atom;
+ }
+ }
+
+ if (!s->inStringToAtomCache()) {
+ MOZ_ASSERT(!map_.lookup(s));
+ return nullptr;
+ }
+
+ return lookupInMap(s);
+ }
+
+ MOZ_ALWAYS_INLINE JSAtom* lookupWithRopeChars(
+ const JS::Latin1Char* str, size_t len,
+ mozilla::Maybe<AtomTableKey>& key) {
+ MOZ_ASSERT(len < MinStringLength);
+ key.emplace(str, len);
+ if (auto p = ropeCharCache_.Lookup(key.value())) {
+ return p.Data();
+ }
+ return nullptr;
+ }
+
+ static constexpr size_t offsetOfLastLookups() {
+ return offsetof(StringToAtomCache, lastLookups_);
+ }
+
+ void maybePut(JSString* s, JSAtom* atom, mozilla::Maybe<AtomTableKey>& key) {
+ if (key.isSome()) {
+ ropeCharCache_.Put(key.value(), atom);
+ }
+
+ for (size_t i = NumLastLookups - 1; i > 0; i--) {
+ lastLookups_[i] = lastLookups_[i - 1];
+ }
+ lastLookups_[0].string = s;
+ lastLookups_[0].atom = atom;
+
+ if (s->length() < MinStringLength) {
+ return;
+ }
+ if (!map_.putNew(s, atom)) {
+ return;
+ }
+ s->setInStringToAtomCache();
+ }
+
+ void purge() {
+ map_.clearAndCompact();
+ for (LastLookup& entry : lastLookups_) {
+ entry.string = nullptr;
+ entry.atom = nullptr;
+ }
+
+ ropeCharCache_.Clear();
+ }
+};
+
+class RuntimeCaches {
+ public:
+ MegamorphicCache megamorphicCache;
+ UniquePtr<MegamorphicSetPropCache> megamorphicSetPropCache;
+ GSNCache gsnCache;
+ UncompressedSourceCache uncompressedSourceCache;
+ EvalCache evalCache;
+ StringToAtomCache stringToAtomCache;
+
+ // Delazification: Cache binding for runtime objects which are used during
+ // delazification to quickly resolve NameLocation of bindings without linearly
+ // iterating over the list of bindings.
+ frontend::RuntimeScopeBindingCache scopeCache;
+
+ // This cache is used to store the result of delazification compilations which
+ // might be happening off-thread. The main-thread will concurrently read the
+ // content of this cache to avoid delazification, or fallback on running the
+ // delazification on the main-thread.
+ //
+ // Main-thread results are not stored in the StencilCache as there is no other
+ // consumer.
+ StencilCache delazificationCache;
+
+ void sweepAfterMinorGC(JSTracer* trc) { evalCache.traceWeak(trc); }
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkEvalCacheAfterMinorGC();
+#endif
+
+ void purgeForCompaction() {
+ evalCache.clear();
+ stringToAtomCache.purge();
+ megamorphicCache.bumpGeneration();
+ if (megamorphicSetPropCache) {
+ // MegamorphicSetPropCache can be null if we failed out of
+ // JSRuntime::init. We will then try to destroy the runtime which will
+ // do a GC and land us here.
+ megamorphicSetPropCache->bumpGeneration();
+ }
+ scopeCache.purge();
+ }
+
+ void purgeStencils() { delazificationCache.clearAndDisable(); }
+
+ void purge() {
+ purgeForCompaction();
+ gsnCache.purge();
+ uncompressedSourceCache.purge();
+ purgeStencils();
+ }
+};
+
+} // namespace js
+
+#endif /* vm_Caches_h */
diff --git a/js/src/vm/CallAndConstruct.cpp b/js/src/vm/CallAndConstruct.cpp
new file mode 100644
index 0000000000..be714e3dd0
--- /dev/null
+++ b/js/src/vm/CallAndConstruct.cpp
@@ -0,0 +1,168 @@
+/* -*- Mode.h: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/CallAndConstruct.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include "jstypes.h" // JS_PUBLIC_API
+#include "gc/Zone.h" // js::Zone
+#include "js/Context.h" // AssertHeapIsIdle
+#include "js/friend/ErrorMessages.h" // JSMSG_*
+#include "js/RootingAPI.h" // JS::Rooted, JS::Handle, JS::MutableHandle
+#include "js/Value.h" // JS::Value, JS::*Value
+#include "js/ValueArray.h" // JS::HandleValueArray
+#include "vm/BytecodeUtil.h" // JSDVG_IGNORE_STACK
+#include "vm/Interpreter.h" // js::Call, js::Construct
+#include "vm/JSAtom.h" // JSAtom, js::Atomize
+#include "vm/JSContext.h" // JSContext, CHECK_THREAD, ReportValueError
+#include "vm/JSObject.h" // JSObject
+#include "vm/Stack.h" // js::InvokeArgs, js::FillArgumentsFromArraylike, js::ConstructArgs
+
+#include "vm/JSContext-inl.h" // JSContext::check
+#include "vm/JSObject-inl.h" // js::IsConstructor
+#include "vm/ObjectOperations-inl.h" // js::GetProperty
+
+using namespace js;
+
+JS_PUBLIC_API bool JS::IsCallable(JSObject* obj) { return obj->isCallable(); }
+
+JS_PUBLIC_API bool JS::IsConstructor(JSObject* obj) {
+ return obj->isConstructor();
+}
+
+JS_PUBLIC_API bool JS_CallFunctionValue(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<JS::Value> fval,
+ const JS::HandleValueArray& args,
+ JS::MutableHandle<JS::Value> rval) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, fval, args);
+
+ js::InvokeArgs iargs(cx);
+ if (!FillArgumentsFromArraylike(cx, iargs, args)) {
+ return false;
+ }
+
+ JS::Rooted<JS::Value> thisv(cx, JS::ObjectOrNullValue(obj));
+ return js::Call(cx, fval, thisv, iargs, rval);
+}
+
+JS_PUBLIC_API bool JS_CallFunction(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JSFunction*> fun,
+ const JS::HandleValueArray& args,
+ JS::MutableHandle<JS::Value> rval) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, fun, args);
+
+ js::InvokeArgs iargs(cx);
+ if (!FillArgumentsFromArraylike(cx, iargs, args)) {
+ return false;
+ }
+
+ JS::Rooted<JS::Value> fval(cx, JS::ObjectValue(*fun));
+ JS::Rooted<JS::Value> thisv(cx, JS::ObjectOrNullValue(obj));
+ return js::Call(cx, fval, thisv, iargs, rval);
+}
+
+JS_PUBLIC_API bool JS_CallFunctionName(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name,
+ const JS::HandleValueArray& args,
+ JS::MutableHandle<JS::Value> rval) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, args);
+
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+
+ JS::Rooted<JS::Value> fval(cx);
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ if (!GetProperty(cx, obj, obj, id, &fval)) {
+ return false;
+ }
+
+ js::InvokeArgs iargs(cx);
+ if (!FillArgumentsFromArraylike(cx, iargs, args)) {
+ return false;
+ }
+
+ JS::Rooted<JS::Value> thisv(cx, JS::ObjectOrNullValue(obj));
+ return js::Call(cx, fval, thisv, iargs, rval);
+}
+
+JS_PUBLIC_API bool JS::Call(JSContext* cx, JS::Handle<JS::Value> thisv,
+ JS::Handle<JS::Value> fval,
+ const JS::HandleValueArray& args,
+ JS::MutableHandle<JS::Value> rval) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(thisv, fval, args);
+
+ js::InvokeArgs iargs(cx);
+ if (!FillArgumentsFromArraylike(cx, iargs, args)) {
+ return false;
+ }
+
+ return js::Call(cx, fval, thisv, iargs, rval);
+}
+
+JS_PUBLIC_API bool JS::Construct(JSContext* cx, JS::Handle<JS::Value> fval,
+ JS::Handle<JSObject*> newTarget,
+ const JS::HandleValueArray& args,
+ JS::MutableHandle<JSObject*> objp) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(fval, newTarget, args);
+
+ if (!js::IsConstructor(fval)) {
+ ReportValueError(cx, JSMSG_NOT_CONSTRUCTOR, JSDVG_IGNORE_STACK, fval,
+ nullptr);
+ return false;
+ }
+
+ JS::Rooted<JS::Value> newTargetVal(cx, JS::ObjectValue(*newTarget));
+ if (!js::IsConstructor(newTargetVal)) {
+ ReportValueError(cx, JSMSG_NOT_CONSTRUCTOR, JSDVG_IGNORE_STACK,
+ newTargetVal, nullptr);
+ return false;
+ }
+
+ js::ConstructArgs cargs(cx);
+ if (!FillArgumentsFromArraylike(cx, cargs, args)) {
+ return false;
+ }
+
+ return js::Construct(cx, fval, cargs, newTargetVal, objp);
+}
+
+JS_PUBLIC_API bool JS::Construct(JSContext* cx, JS::Handle<JS::Value> fval,
+ const JS::HandleValueArray& args,
+ JS::MutableHandle<JSObject*> objp) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(fval, args);
+
+ if (!js::IsConstructor(fval)) {
+ ReportValueError(cx, JSMSG_NOT_CONSTRUCTOR, JSDVG_IGNORE_STACK, fval,
+ nullptr);
+ return false;
+ }
+
+ js::ConstructArgs cargs(cx);
+ if (!FillArgumentsFromArraylike(cx, cargs, args)) {
+ return false;
+ }
+
+ return js::Construct(cx, fval, cargs, fval, objp);
+}
diff --git a/js/src/vm/CallNonGenericMethod.cpp b/js/src/vm/CallNonGenericMethod.cpp
new file mode 100644
index 0000000000..2164f7162b
--- /dev/null
+++ b/js/src/vm/CallNonGenericMethod.cpp
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/CallNonGenericMethod.h"
+
+#include "proxy/Proxy.h"
+#include "vm/JSFunction.h"
+#include "vm/JSObject.h"
+#include "vm/ProxyObject.h"
+#include "vm/SelfHosting.h"
+
+using namespace js;
+
+bool JS::detail::CallMethodIfWrapped(JSContext* cx, IsAcceptableThis test,
+ NativeImpl impl, const CallArgs& args) {
+ HandleValue thisv = args.thisv();
+ MOZ_ASSERT(!test(thisv));
+
+ if (thisv.isObject()) {
+ JSObject& thisObj = args.thisv().toObject();
+ if (thisObj.is<ProxyObject>()) {
+ return Proxy::nativeCall(cx, test, impl, args);
+ }
+ }
+
+ if (IsCallSelfHostedNonGenericMethod(impl)) {
+ return ReportIncompatibleSelfHostedMethod(cx, thisv);
+ }
+
+ ReportIncompatible(cx, args);
+ return false;
+}
diff --git a/js/src/vm/CharacterEncoding.cpp b/js/src/vm/CharacterEncoding.cpp
new file mode 100644
index 0000000000..52edcae45e
--- /dev/null
+++ b/js/src/vm/CharacterEncoding.cpp
@@ -0,0 +1,888 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/CharacterEncoding.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Latin1.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Range.h"
+#include "mozilla/Span.h"
+#include "mozilla/Sprintf.h"
+#include "mozilla/TextUtils.h"
+#include "mozilla/Utf8.h"
+
+#ifndef XP_LINUX
+// We still support libstd++ versions without codecvt support on Linux.
+# include <codecvt>
+#endif
+#include <cwchar>
+#include <limits>
+#include <locale>
+#include <type_traits>
+
+#include "frontend/FrontendContext.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "util/StringBuffer.h"
+#include "util/Unicode.h" // unicode::REPLACEMENT_CHARACTER
+#include "vm/JSContext.h"
+
+using mozilla::AsChars;
+using mozilla::AsciiValidUpTo;
+using mozilla::AsWritableChars;
+using mozilla::ConvertLatin1toUtf8Partial;
+using mozilla::ConvertUtf16toUtf8Partial;
+using mozilla::IsAscii;
+using mozilla::IsUtf8Latin1;
+using mozilla::LossyConvertUtf16toLatin1;
+using mozilla::Span;
+using mozilla::Utf8Unit;
+
+using JS::Latin1CharsZ;
+using JS::TwoByteCharsZ;
+using JS::UTF8Chars;
+using JS::UTF8CharsZ;
+
+using namespace js;
+using namespace js::unicode;
+
+Latin1CharsZ JS::LossyTwoByteCharsToNewLatin1CharsZ(
+ JSContext* cx, const mozilla::Range<const char16_t> tbchars) {
+ MOZ_ASSERT(cx);
+ size_t len = tbchars.length();
+ unsigned char* latin1 = cx->pod_malloc<unsigned char>(len + 1);
+ if (!latin1) {
+ return Latin1CharsZ();
+ }
+ LossyConvertUtf16toLatin1(tbchars, AsWritableChars(Span(latin1, len)));
+ latin1[len] = '\0';
+ return Latin1CharsZ(latin1, len);
+}
+
+template <typename CharT>
+static size_t GetDeflatedUTF8StringLength(const CharT* chars, size_t nchars) {
+ size_t nbytes = nchars;
+ for (const CharT* end = chars + nchars; chars < end; chars++) {
+ char16_t c = *chars;
+ if (c < 0x80) {
+ continue;
+ }
+ char32_t v;
+ if (IsSurrogate(c)) {
+ /* nbytes sets 1 length since this is surrogate pair. */
+ if (IsTrailSurrogate(c) || (chars + 1) == end) {
+ nbytes += 2; /* Bad Surrogate */
+ continue;
+ }
+ char16_t c2 = chars[1];
+ if (!IsTrailSurrogate(c2)) {
+ nbytes += 2; /* Bad Surrogate */
+ continue;
+ }
+ v = UTF16Decode(c, c2);
+ nbytes--;
+ chars++;
+ } else {
+ v = c;
+ }
+ v >>= 11;
+ nbytes++;
+ while (v) {
+ v >>= 5;
+ nbytes++;
+ }
+ }
+ return nbytes;
+}
+
+JS_PUBLIC_API size_t JS::GetDeflatedUTF8StringLength(JSLinearString* s) {
+ JS::AutoCheckCannotGC nogc;
+ return s->hasLatin1Chars()
+ ? ::GetDeflatedUTF8StringLength(s->latin1Chars(nogc), s->length())
+ : ::GetDeflatedUTF8StringLength(s->twoByteChars(nogc),
+ s->length());
+}
+
+JS_PUBLIC_API size_t JS::DeflateStringToUTF8Buffer(JSLinearString* src,
+ mozilla::Span<char> dst) {
+ JS::AutoCheckCannotGC nogc;
+ if (src->hasLatin1Chars()) {
+ auto source = AsChars(Span(src->latin1Chars(nogc), src->length()));
+ auto [read, written] = ConvertLatin1toUtf8Partial(source, dst);
+ (void)read;
+ return written;
+ }
+ auto source = Span(src->twoByteChars(nogc), src->length());
+ auto [read, written] = ConvertUtf16toUtf8Partial(source, dst);
+ (void)read;
+ return written;
+}
+
+template <typename CharT>
+void ConvertToUTF8(mozilla::Span<CharT> src, mozilla::Span<char> dst);
+
+template <>
+void ConvertToUTF8<const char16_t>(mozilla::Span<const char16_t> src,
+ mozilla::Span<char> dst) {
+ (void)ConvertUtf16toUtf8Partial(src, dst);
+}
+
+template <>
+void ConvertToUTF8<const Latin1Char>(mozilla::Span<const Latin1Char> src,
+ mozilla::Span<char> dst) {
+ (void)ConvertLatin1toUtf8Partial(AsChars(src), dst);
+}
+
+template <typename CharT, typename Allocator>
+UTF8CharsZ JS::CharsToNewUTF8CharsZ(Allocator* alloc,
+ const mozilla::Range<CharT> chars) {
+ /* Get required buffer size. */
+ const CharT* str = chars.begin().get();
+ size_t len = ::GetDeflatedUTF8StringLength(str, chars.length());
+
+ /* Allocate buffer. */
+ char* utf8 = alloc->template pod_malloc<char>(len + 1);
+ if (!utf8) {
+ return UTF8CharsZ();
+ }
+
+ /* Encode to UTF8. */
+ ::ConvertToUTF8(Span(str, chars.length()), Span(utf8, len));
+ utf8[len] = '\0';
+
+ return UTF8CharsZ(utf8, len);
+}
+
+template UTF8CharsZ JS::CharsToNewUTF8CharsZ(
+ JSContext* cx, const mozilla::Range<Latin1Char> chars);
+
+template UTF8CharsZ JS::CharsToNewUTF8CharsZ(
+ JSContext* cx, const mozilla::Range<char16_t> chars);
+
+template UTF8CharsZ JS::CharsToNewUTF8CharsZ(
+ JSContext* cx, const mozilla::Range<const Latin1Char> chars);
+
+template UTF8CharsZ JS::CharsToNewUTF8CharsZ(
+ JSContext* cx, const mozilla::Range<const char16_t> chars);
+
+template UTF8CharsZ JS::CharsToNewUTF8CharsZ(
+ FrontendAllocator* cx, const mozilla::Range<Latin1Char> chars);
+
+template UTF8CharsZ JS::CharsToNewUTF8CharsZ(
+ FrontendAllocator* cx, const mozilla::Range<char16_t> chars);
+
+template UTF8CharsZ JS::CharsToNewUTF8CharsZ(
+ FrontendAllocator* cx, const mozilla::Range<const Latin1Char> chars);
+
+template UTF8CharsZ JS::CharsToNewUTF8CharsZ(
+ FrontendAllocator* cx, const mozilla::Range<const char16_t> chars);
+
+static constexpr uint32_t INVALID_UTF8 = std::numeric_limits<char32_t>::max();
+
+/*
+ * Convert a UTF-8 character sequence into a UCS-4 character and return that
+ * character. It is assumed that the caller already checked that the sequence
+ * is valid.
+ */
+static char32_t Utf8ToOneUcs4CharImpl(const uint8_t* utf8Buffer,
+ int utf8Length) {
+ MOZ_ASSERT(1 <= utf8Length && utf8Length <= 4);
+
+ if (utf8Length == 1) {
+ MOZ_ASSERT(!(*utf8Buffer & 0x80));
+ return *utf8Buffer;
+ }
+
+ /* from Unicode 3.1, non-shortest form is illegal */
+ static const char32_t minucs4Table[] = {0x80, 0x800, NonBMPMin};
+
+ MOZ_ASSERT((*utf8Buffer & (0x100 - (1 << (7 - utf8Length)))) ==
+ (0x100 - (1 << (8 - utf8Length))));
+ char32_t ucs4Char = *utf8Buffer++ & ((1 << (7 - utf8Length)) - 1);
+ char32_t minucs4Char = minucs4Table[utf8Length - 2];
+ while (--utf8Length) {
+ MOZ_ASSERT((*utf8Buffer & 0xC0) == 0x80);
+ ucs4Char = (ucs4Char << 6) | (*utf8Buffer++ & 0x3F);
+ }
+
+ if (MOZ_UNLIKELY(ucs4Char < minucs4Char)) {
+ return INVALID_UTF8;
+ }
+
+ if (MOZ_UNLIKELY(IsSurrogate(ucs4Char))) {
+ return INVALID_UTF8;
+ }
+
+ return ucs4Char;
+}
+
+char32_t JS::Utf8ToOneUcs4Char(const uint8_t* utf8Buffer, int utf8Length) {
+ return Utf8ToOneUcs4CharImpl(utf8Buffer, utf8Length);
+}
+
+static void ReportInvalidCharacter(JSContext* cx, uint32_t offset) {
+ char buffer[10];
+ SprintfLiteral(buffer, "%u", offset);
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_MALFORMED_UTF8_CHAR, buffer);
+}
+
+static void ReportBufferTooSmall(JSContext* cx, uint32_t dummy) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BUFFER_TOO_SMALL);
+}
+
+static void ReportTooBigCharacter(JSContext* cx, uint32_t v) {
+ char buffer[11];
+ SprintfLiteral(buffer, "0x%x", v);
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_UTF8_CHAR_TOO_LARGE, buffer);
+}
+
+enum class LoopDisposition {
+ Break,
+ Continue,
+};
+
+enum class OnUTF8Error {
+ InsertReplacementCharacter,
+ InsertQuestionMark,
+ Throw,
+ Crash,
+};
+
+// Scan UTF-8 input and (internally, at least) convert it to a series of UTF-16
+// code units. But you can also do odd things like pass an empty lambda for
+// `dst`, in which case the output is discarded entirely--the only effect of
+// calling the template that way is error-checking.
+template <OnUTF8Error ErrorAction, typename OutputFn>
+static bool InflateUTF8ToUTF16(JSContext* cx, const UTF8Chars src,
+ OutputFn dst) {
+ size_t srclen = src.length();
+ for (uint32_t i = 0; i < srclen; i++) {
+ uint32_t v = uint32_t(src[i]);
+ if (!(v & 0x80)) {
+ // ASCII code unit. Simple copy.
+ if (dst(uint16_t(v)) == LoopDisposition::Break) {
+ break;
+ }
+ } else {
+ // Non-ASCII code unit. Determine its length in bytes (n).
+ uint32_t n = 1;
+ while (v & (0x80 >> n)) {
+ n++;
+ }
+
+#define INVALID(report, arg, n2) \
+ do { \
+ if (ErrorAction == OnUTF8Error::Throw) { \
+ report(cx, arg); \
+ return false; \
+ } else if (ErrorAction == OnUTF8Error::Crash) { \
+ MOZ_CRASH("invalid UTF-8 string: " #report); \
+ } else { \
+ char16_t replacement; \
+ if (ErrorAction == OnUTF8Error::InsertReplacementCharacter) { \
+ replacement = REPLACEMENT_CHARACTER; \
+ } else { \
+ MOZ_ASSERT(ErrorAction == OnUTF8Error::InsertQuestionMark); \
+ replacement = '?'; \
+ } \
+ if (dst(replacement) == LoopDisposition::Break) { \
+ break; \
+ } \
+ n = n2; \
+ goto invalidMultiByteCodeUnit; \
+ } \
+ } while (0)
+
+ // Check the leading byte.
+ if (n < 2 || n > 4) {
+ INVALID(ReportInvalidCharacter, i, 1);
+ }
+
+ // Check that |src| is large enough to hold an n-byte code unit.
+ if (i + n > srclen) {
+ INVALID(ReportBufferTooSmall, /* dummy = */ 0, 1);
+ }
+
+ // Check the second byte. From Unicode Standard v6.2, Table 3-7
+ // Well-Formed UTF-8 Byte Sequences.
+ if ((v == 0xE0 && ((uint8_t)src[i + 1] & 0xE0) != 0xA0) || // E0 A0~BF
+ (v == 0xED && ((uint8_t)src[i + 1] & 0xE0) != 0x80) || // ED 80~9F
+ (v == 0xF0 && ((uint8_t)src[i + 1] & 0xF0) == 0x80) || // F0 90~BF
+ (v == 0xF4 && ((uint8_t)src[i + 1] & 0xF0) != 0x80)) // F4 80~8F
+ {
+ INVALID(ReportInvalidCharacter, i, 1);
+ }
+
+ // Check the continuation bytes.
+ for (uint32_t m = 1; m < n; m++) {
+ if ((src[i + m] & 0xC0) != 0x80) {
+ INVALID(ReportInvalidCharacter, i, m);
+ }
+ }
+
+ // Determine the code unit's length in CharT and act accordingly.
+ v = Utf8ToOneUcs4CharImpl((uint8_t*)&src[i], n);
+ if (v < NonBMPMin) {
+ // The n-byte UTF8 code unit will fit in a single CharT.
+ if (dst(char16_t(v)) == LoopDisposition::Break) {
+ break;
+ }
+ } else if (v <= NonBMPMax) {
+ // The n-byte UTF8 code unit will fit in two CharT units.
+ if (dst(LeadSurrogate(v)) == LoopDisposition::Break) {
+ break;
+ }
+ if (dst(TrailSurrogate(v)) == LoopDisposition::Break) {
+ break;
+ }
+ } else {
+ // The n-byte UTF8 code unit won't fit in two CharT units.
+ INVALID(ReportTooBigCharacter, v, 1);
+ }
+
+ invalidMultiByteCodeUnit:
+ // Move i to the last byte of the multi-byte code unit; the loop
+ // header will do the final i++ to move to the start of the next
+ // code unit.
+ i += n - 1;
+ }
+ }
+
+ return true;
+}
+
+template <OnUTF8Error ErrorAction, typename CharT>
+static void CopyAndInflateUTF8IntoBuffer(JSContext* cx, const UTF8Chars src,
+ CharT* dst, size_t outlen,
+ bool allASCII) {
+ if (allASCII) {
+ size_t srclen = src.length();
+ MOZ_ASSERT(outlen == srclen);
+ for (uint32_t i = 0; i < srclen; i++) {
+ dst[i] = CharT(src[i]);
+ }
+ } else {
+ size_t j = 0;
+ auto push = [dst, &j](char16_t c) -> LoopDisposition {
+ dst[j++] = CharT(c);
+ return LoopDisposition::Continue;
+ };
+ MOZ_ALWAYS_TRUE((InflateUTF8ToUTF16<ErrorAction>(cx, src, push)));
+ MOZ_ASSERT(j == outlen);
+ }
+}
+
+template <OnUTF8Error ErrorAction, typename CharsT>
+static CharsT InflateUTF8StringHelper(JSContext* cx, const UTF8Chars src,
+ size_t* outlen, arena_id_t destArenaId) {
+ using CharT = typename CharsT::CharT;
+ static_assert(
+ std::is_same_v<CharT, char16_t> || std::is_same_v<CharT, Latin1Char>,
+ "bad CharT");
+
+ *outlen = 0;
+
+ size_t len = 0;
+ bool allASCII = true;
+ auto count = [&len, &allASCII](char16_t c) -> LoopDisposition {
+ len++;
+ allASCII &= (c < 0x80);
+ return LoopDisposition::Continue;
+ };
+ if (!InflateUTF8ToUTF16<ErrorAction>(cx, src, count)) {
+ return CharsT();
+ }
+ *outlen = len;
+
+ CharT* dst = cx->pod_arena_malloc<CharT>(destArenaId,
+ *outlen + 1); // +1 for NUL
+
+ if (!dst) {
+ ReportOutOfMemory(cx);
+ return CharsT();
+ }
+
+ constexpr OnUTF8Error errorMode =
+ std::is_same_v<CharT, Latin1Char>
+ ? OnUTF8Error::InsertQuestionMark
+ : OnUTF8Error::InsertReplacementCharacter;
+ CopyAndInflateUTF8IntoBuffer<errorMode>(cx, src, dst, *outlen, allASCII);
+ dst[*outlen] = CharT('\0');
+
+ return CharsT(dst, *outlen);
+}
+
+TwoByteCharsZ JS::UTF8CharsToNewTwoByteCharsZ(JSContext* cx,
+ const UTF8Chars utf8,
+ size_t* outlen,
+ arena_id_t destArenaId) {
+ return InflateUTF8StringHelper<OnUTF8Error::Throw, TwoByteCharsZ>(
+ cx, utf8, outlen, destArenaId);
+}
+
+TwoByteCharsZ JS::UTF8CharsToNewTwoByteCharsZ(JSContext* cx,
+ const ConstUTF8CharsZ& utf8,
+ size_t* outlen,
+ arena_id_t destArenaId) {
+ UTF8Chars chars(utf8.c_str(), strlen(utf8.c_str()));
+ return InflateUTF8StringHelper<OnUTF8Error::Throw, TwoByteCharsZ>(
+ cx, chars, outlen, destArenaId);
+}
+
+TwoByteCharsZ JS::LossyUTF8CharsToNewTwoByteCharsZ(JSContext* cx,
+ const JS::UTF8Chars utf8,
+ size_t* outlen,
+ arena_id_t destArenaId) {
+ return InflateUTF8StringHelper<OnUTF8Error::InsertReplacementCharacter,
+ TwoByteCharsZ>(cx, utf8, outlen, destArenaId);
+}
+
+TwoByteCharsZ JS::LossyUTF8CharsToNewTwoByteCharsZ(
+ JSContext* cx, const JS::ConstUTF8CharsZ& utf8, size_t* outlen,
+ arena_id_t destArenaId) {
+ UTF8Chars chars(utf8.c_str(), strlen(utf8.c_str()));
+ return InflateUTF8StringHelper<OnUTF8Error::InsertReplacementCharacter,
+ TwoByteCharsZ>(cx, chars, outlen, destArenaId);
+}
+
+static void UpdateSmallestEncodingForChar(char16_t c,
+ JS::SmallestEncoding* encoding) {
+ JS::SmallestEncoding newEncoding = JS::SmallestEncoding::ASCII;
+ if (c >= 0x80) {
+ if (c < 0x100) {
+ newEncoding = JS::SmallestEncoding::Latin1;
+ } else {
+ newEncoding = JS::SmallestEncoding::UTF16;
+ }
+ }
+ if (newEncoding > *encoding) {
+ *encoding = newEncoding;
+ }
+}
+
+JS::SmallestEncoding JS::FindSmallestEncoding(UTF8Chars utf8) {
+ Span<unsigned char> unsignedSpan = utf8;
+ auto charSpan = AsChars(unsignedSpan);
+ size_t upTo = AsciiValidUpTo(charSpan);
+ if (upTo == charSpan.Length()) {
+ return SmallestEncoding::ASCII;
+ }
+ if (IsUtf8Latin1(charSpan.From(upTo))) {
+ return SmallestEncoding::Latin1;
+ }
+ return SmallestEncoding::UTF16;
+}
+
+Latin1CharsZ JS::UTF8CharsToNewLatin1CharsZ(JSContext* cx, const UTF8Chars utf8,
+ size_t* outlen,
+ arena_id_t destArenaId) {
+ return InflateUTF8StringHelper<OnUTF8Error::Throw, Latin1CharsZ>(
+ cx, utf8, outlen, destArenaId);
+}
+
+Latin1CharsZ JS::LossyUTF8CharsToNewLatin1CharsZ(JSContext* cx,
+ const UTF8Chars utf8,
+ size_t* outlen,
+ arena_id_t destArenaId) {
+ return InflateUTF8StringHelper<OnUTF8Error::InsertQuestionMark, Latin1CharsZ>(
+ cx, utf8, outlen, destArenaId);
+}
+
+/**
+ * Atomization Helpers.
+ *
+ * These functions are extremely single-use, and are not intended for general
+ * consumption.
+ */
+
+bool GetUTF8AtomizationData(JSContext* cx, const JS::UTF8Chars utf8,
+ size_t* outlen, JS::SmallestEncoding* encoding,
+ HashNumber* hashNum) {
+ *outlen = 0;
+ *encoding = JS::SmallestEncoding::ASCII;
+ *hashNum = 0;
+
+ auto getMetadata = [outlen, encoding,
+ hashNum](char16_t c) -> LoopDisposition {
+ (*outlen)++;
+ UpdateSmallestEncodingForChar(c, encoding);
+ *hashNum = mozilla::AddToHash(*hashNum, c);
+ return LoopDisposition::Continue;
+ };
+ if (!InflateUTF8ToUTF16<OnUTF8Error::Throw>(cx, utf8, getMetadata)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename CharT>
+bool UTF8EqualsChars(const JS::UTF8Chars utfChars, const CharT* chars) {
+ size_t ind = 0;
+ bool isEqual = true;
+
+ auto checkEqual = [&isEqual, &ind, chars](char16_t c) -> LoopDisposition {
+#ifdef DEBUG
+ JS::SmallestEncoding encoding = JS::SmallestEncoding::ASCII;
+ UpdateSmallestEncodingForChar(c, &encoding);
+ if (std::is_same_v<CharT, JS::Latin1Char>) {
+ MOZ_ASSERT(encoding <= JS::SmallestEncoding::Latin1);
+ } else if (!std::is_same_v<CharT, char16_t>) {
+ MOZ_CRASH("Invalid character type in UTF8EqualsChars");
+ }
+#endif
+
+ if (CharT(c) != chars[ind]) {
+ isEqual = false;
+ return LoopDisposition::Break;
+ }
+
+ ind++;
+ return LoopDisposition::Continue;
+ };
+
+ // To get here, you must have checked your work.
+ InflateUTF8ToUTF16<OnUTF8Error::Crash>(/* cx = */ nullptr, utfChars,
+ checkEqual);
+
+ return isEqual;
+}
+
+template bool UTF8EqualsChars(const JS::UTF8Chars, const char16_t*);
+template bool UTF8EqualsChars(const JS::UTF8Chars, const JS::Latin1Char*);
+
+template <typename CharT>
+void InflateUTF8CharsToBuffer(const JS::UTF8Chars src, CharT* dst,
+ size_t dstLen, JS::SmallestEncoding encoding) {
+ CopyAndInflateUTF8IntoBuffer<OnUTF8Error::Crash>(
+ /* cx = */ nullptr, src, dst, dstLen,
+ encoding == JS::SmallestEncoding::ASCII);
+}
+
+template void InflateUTF8CharsToBuffer(const UTF8Chars src, char16_t* dst,
+ size_t dstLen,
+ JS::SmallestEncoding encoding);
+template void InflateUTF8CharsToBuffer(const UTF8Chars src, JS::Latin1Char* dst,
+ size_t dstLen,
+ JS::SmallestEncoding encoding);
+
+#ifdef DEBUG
+void JS::ConstUTF8CharsZ::validate(size_t aLength) {
+ MOZ_ASSERT(data_);
+ UTF8Chars chars(data_, aLength);
+ auto nop = [](char16_t) -> LoopDisposition {
+ return LoopDisposition::Continue;
+ };
+ InflateUTF8ToUTF16<OnUTF8Error::Crash>(/* cx = */ nullptr, chars, nop);
+}
+#endif
+
+bool JS::StringIsASCII(const char* s) {
+ while (*s) {
+ if (*s & 0x80) {
+ return false;
+ }
+ s++;
+ }
+ return true;
+}
+
+bool JS::StringIsASCII(Span<const char> s) { return IsAscii(s); }
+
+JS_PUBLIC_API JS::UniqueChars JS::EncodeNarrowToUtf8(JSContext* cx,
+ const char* chars) {
+ // Convert the narrow multibyte character string to a wide string and then
+ // use EncodeWideToUtf8() to convert the wide string to a UTF-8 string.
+
+ std::mbstate_t mb{};
+
+ // NOTE: The 2nd parameter is overwritten even if the 1st parameter is nullptr
+ // on Android NDK older than v16. Use a temporary variable to save the
+ // `chars` for the subsequent call. See bug 1492090.
+ const char* tmpChars = chars;
+
+ size_t wideLen = std::mbsrtowcs(nullptr, &tmpChars, 0, &mb);
+ if (wideLen == size_t(-1)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CANT_CONVERT_TO_WIDE);
+ return nullptr;
+ }
+ MOZ_ASSERT(std::mbsinit(&mb),
+ "multi-byte state is in its initial state when no conversion "
+ "error occured");
+
+ size_t bufLen = wideLen + 1;
+ auto wideChars = cx->make_pod_array<wchar_t>(bufLen);
+ if (!wideChars) {
+ return nullptr;
+ }
+
+ mozilla::DebugOnly<size_t> actualLen =
+ std::mbsrtowcs(wideChars.get(), &chars, bufLen, &mb);
+ MOZ_ASSERT(wideLen == actualLen);
+ MOZ_ASSERT(wideChars[actualLen] == '\0');
+
+ return EncodeWideToUtf8(cx, wideChars.get());
+}
+
+JS_PUBLIC_API JS::UniqueChars JS::EncodeWideToUtf8(JSContext* cx,
+ const wchar_t* chars) {
+ using CheckedSizeT = mozilla::CheckedInt<size_t>;
+
+#ifndef XP_LINUX
+ // Use the standard codecvt facet to convert a wide string to UTF-8.
+ std::codecvt_utf8<wchar_t> cv;
+
+ size_t len = std::wcslen(chars);
+ CheckedSizeT utf8MaxLen = CheckedSizeT(len) * cv.max_length();
+ CheckedSizeT utf8BufLen = utf8MaxLen + 1;
+ if (!utf8BufLen.isValid()) {
+ JS_ReportAllocationOverflow(cx);
+ return nullptr;
+ }
+ auto utf8 = cx->make_pod_array<char>(utf8BufLen.value());
+ if (!utf8) {
+ return nullptr;
+ }
+
+ // STL returns |codecvt_base::partial| for empty strings.
+ if (len == 0) {
+ return utf8;
+ }
+
+ std::mbstate_t mb{};
+ const wchar_t* fromNext;
+ char* toNext;
+ std::codecvt_base::result result =
+ cv.out(mb, chars, chars + len, fromNext, utf8.get(),
+ utf8.get() + utf8MaxLen.value(), toNext);
+ if (result != std::codecvt_base::ok) {
+ MOZ_ASSERT(result == std::codecvt_base::error);
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CANT_CONVERT_WIDE_TO_UTF8);
+ return nullptr;
+ }
+ *toNext = '\0'; // Explicit null-termination required.
+
+ // codecvt_utf8 doesn't validate its output and may produce WTF-8 instead
+ // of UTF-8 on some platforms when the input contains unpaired surrogate
+ // characters. We don't allow this.
+ if (!mozilla::IsUtf8(
+ mozilla::Span(utf8.get(), size_t(toNext - utf8.get())))) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CANT_CONVERT_WIDE_TO_UTF8);
+ return nullptr;
+ }
+
+ return utf8;
+#else
+ static_assert(sizeof(wchar_t) == 4,
+ "Assume wchar_t is UTF-32 on Linux systems");
+
+ constexpr size_t MaxUtf8CharLength = 4;
+
+ size_t len = std::wcslen(chars);
+ CheckedSizeT utf8MaxLen = CheckedSizeT(len) * MaxUtf8CharLength;
+ CheckedSizeT utf8BufLen = utf8MaxLen + 1;
+ if (!utf8BufLen.isValid()) {
+ JS_ReportAllocationOverflow(cx);
+ return nullptr;
+ }
+ auto utf8 = cx->make_pod_array<char>(utf8BufLen.value());
+ if (!utf8) {
+ return nullptr;
+ }
+
+ char* dst = utf8.get();
+ for (size_t i = 0; i < len; i++) {
+ uint8_t utf8buf[MaxUtf8CharLength];
+ uint32_t utf8Len = OneUcs4ToUtf8Char(utf8buf, chars[i]);
+ for (size_t j = 0; j < utf8Len; j++) {
+ *dst++ = char(utf8buf[j]);
+ }
+ }
+ *dst = '\0';
+
+ return utf8;
+#endif
+}
+
+JS_PUBLIC_API JS::UniqueChars JS::EncodeUtf8ToNarrow(JSContext* cx,
+ const char* chars) {
+ // Convert the UTF-8 string to a wide string via EncodeUtf8ToWide() and
+ // then convert the resulting wide string to a narrow multibyte character
+ // string.
+
+ auto wideChars = EncodeUtf8ToWide(cx, chars);
+ if (!wideChars) {
+ return nullptr;
+ }
+
+ const wchar_t* cWideChars = wideChars.get();
+ std::mbstate_t mb{};
+ size_t narrowLen = std::wcsrtombs(nullptr, &cWideChars, 0, &mb);
+ if (narrowLen == size_t(-1)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CANT_CONVERT_TO_NARROW);
+ return nullptr;
+ }
+ MOZ_ASSERT(std::mbsinit(&mb),
+ "multi-byte state is in its initial state when no conversion "
+ "error occured");
+
+ size_t bufLen = narrowLen + 1;
+ auto narrow = cx->make_pod_array<char>(bufLen);
+ if (!narrow) {
+ return nullptr;
+ }
+
+ mozilla::DebugOnly<size_t> actualLen =
+ std::wcsrtombs(narrow.get(), &cWideChars, bufLen, &mb);
+ MOZ_ASSERT(narrowLen == actualLen);
+ MOZ_ASSERT(narrow[actualLen] == '\0');
+
+ return narrow;
+}
+
+JS_PUBLIC_API JS::UniqueWideChars JS::EncodeUtf8ToWide(JSContext* cx,
+ const char* chars) {
+ // Only valid UTF-8 strings should be passed to this function.
+ MOZ_ASSERT(mozilla::IsUtf8(mozilla::Span(chars, strlen(chars))));
+
+#ifndef XP_LINUX
+ // Use the standard codecvt facet to convert from UTF-8 to a wide string.
+ std::codecvt_utf8<wchar_t> cv;
+
+ size_t len = strlen(chars);
+ auto wideChars = cx->make_pod_array<wchar_t>(len + 1);
+ if (!wideChars) {
+ return nullptr;
+ }
+
+ // STL returns |codecvt_base::partial| for empty strings.
+ if (len == 0) {
+ return wideChars;
+ }
+
+ std::mbstate_t mb{};
+ const char* fromNext;
+ wchar_t* toNext;
+ std::codecvt_base::result result =
+ cv.in(mb, chars, chars + len, fromNext, wideChars.get(),
+ wideChars.get() + len, toNext);
+ if (result != std::codecvt_base::ok) {
+ MOZ_ASSERT(result == std::codecvt_base::error);
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CANT_CONVERT_UTF8_TO_WIDE);
+ return nullptr;
+ }
+ *toNext = '\0'; // Explicit null-termination required.
+
+ return wideChars;
+#else
+ static_assert(sizeof(wchar_t) == 4,
+ "Assume wchar_t is UTF-32 on Linux systems");
+
+ size_t len = strlen(chars);
+ auto wideChars = cx->make_pod_array<wchar_t>(len + 1);
+ if (!wideChars) {
+ return nullptr;
+ }
+
+ const auto* s = reinterpret_cast<const unsigned char*>(chars);
+ const auto* const limit = s + len;
+
+ wchar_t* dst = wideChars.get();
+ while (s < limit) {
+ unsigned char c = *s++;
+
+ if (mozilla::IsAscii(c)) {
+ *dst++ = wchar_t(c);
+ continue;
+ }
+
+ mozilla::Utf8Unit utf8(c);
+ mozilla::Maybe<char32_t> codePoint =
+ mozilla::DecodeOneUtf8CodePoint(utf8, &s, limit);
+ MOZ_ASSERT(codePoint.isSome());
+ *dst++ = wchar_t(*codePoint);
+ }
+ *dst++ = '\0';
+
+ return wideChars;
+#endif
+}
+
+bool StringBuffer::append(const Utf8Unit* units, size_t len) {
+ MOZ_ASSERT(maybeCx_);
+
+ if (isLatin1()) {
+ Latin1CharBuffer& latin1 = latin1Chars();
+
+ while (len > 0) {
+ if (!IsAscii(*units)) {
+ break;
+ }
+
+ if (!latin1.append(units->toUnsignedChar())) {
+ return false;
+ }
+
+ ++units;
+ --len;
+ }
+ if (len == 0) {
+ return true;
+ }
+
+ // Non-ASCII doesn't *necessarily* mean we couldn't keep appending to
+ // |latin1|, but it's only possible for [U+0080, U+0100) code points,
+ // and handling the full complexity of UTF-8 only for that very small
+ // additional range isn't worth it. Inflate to two-byte storage before
+ // appending the remaining code points.
+ if (!inflateChars()) {
+ return false;
+ }
+ }
+
+ UTF8Chars remainingUtf8(units, len);
+
+ // Determine how many UTF-16 code units are required to represent the
+ // remaining units.
+ size_t utf16Len = 0;
+ auto countInflated = [&utf16Len](char16_t c) -> LoopDisposition {
+ utf16Len++;
+ return LoopDisposition::Continue;
+ };
+ if (!InflateUTF8ToUTF16<OnUTF8Error::Throw>(maybeCx_, remainingUtf8,
+ countInflated)) {
+ return false;
+ }
+
+ TwoByteCharBuffer& buf = twoByteChars();
+
+ size_t i = buf.length();
+ if (!buf.growByUninitialized(utf16Len)) {
+ return false;
+ }
+ MOZ_ASSERT(i + utf16Len == buf.length(),
+ "growByUninitialized assumed to increase length immediately");
+
+ char16_t* toFill = &buf[i];
+ auto appendUtf16 = [&toFill](char16_t unit) {
+ *toFill++ = unit;
+ return LoopDisposition::Continue;
+ };
+
+ MOZ_ALWAYS_TRUE(InflateUTF8ToUTF16<OnUTF8Error::Throw>(
+ maybeCx_, remainingUtf8, appendUtf16));
+ MOZ_ASSERT(toFill == buf.end());
+ return true;
+}
diff --git a/js/src/vm/CheckIsObjectKind.h b/js/src/vm/CheckIsObjectKind.h
new file mode 100644
index 0000000000..321870d6ed
--- /dev/null
+++ b/js/src/vm/CheckIsObjectKind.h
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_CheckIsObjectKind_h
+#define vm_CheckIsObjectKind_h
+
+#include <stdint.h> // uint8_t
+
+namespace js {
+
+enum class CheckIsObjectKind : uint8_t {
+ IteratorNext,
+ IteratorReturn,
+ IteratorThrow,
+ GetIterator,
+ GetAsyncIterator
+};
+
+} // namespace js
+
+#endif /* vm_CheckIsObjectKind_h */
diff --git a/js/src/vm/CodeCoverage.cpp b/js/src/vm/CodeCoverage.cpp
new file mode 100644
index 0000000000..120fe1da6d
--- /dev/null
+++ b/js/src/vm/CodeCoverage.cpp
@@ -0,0 +1,673 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/CodeCoverage.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/IntegerPrintfMacros.h"
+
+#include <stdio.h>
+#include <utility>
+
+#include "frontend/SourceNotes.h" // SrcNote, SrcNoteType, SrcNoteIterator
+#include "gc/Zone.h"
+#include "util/GetPidProvider.h" // getpid()
+#include "util/Text.h"
+#include "vm/BytecodeUtil.h"
+#include "vm/JSScript.h"
+#include "vm/Realm.h"
+#include "vm/Runtime.h"
+#include "vm/Time.h"
+
+// This file contains a few functions which are used to produce files understood
+// by lcov tools. A detailed description of the format is available in the man
+// page for "geninfo" [1]. To make it short, the following paraphrases what is
+// commented in the man page by using curly braces prefixed by for-each to
+// express repeated patterns.
+//
+// TN:<compartment name>
+// for-each <source file> {
+// SF:<filename>
+// for-each <script> {
+// FN:<line>,<name>
+// }
+// for-each <script> {
+// FNDA:<hits>,<name>
+// }
+// FNF:<number of scripts>
+// FNH:<sum of scripts hits>
+// for-each <script> {
+// for-each <branch> {
+// BRDA:<line>,<block id>,<target id>,<taken>
+// }
+// }
+// BRF:<number of branches>
+// BRH:<sum of branches hits>
+// for-each <script> {
+// for-each <line> {
+// DA:<line>,<hits>
+// }
+// }
+// LF:<number of lines>
+// LH:<sum of lines hits>
+// }
+//
+// [1] http://ltp.sourceforge.net/coverage/lcov/geninfo.1.php
+//
+namespace js {
+namespace coverage {
+
+LCovSource::LCovSource(LifoAlloc* alloc, UniqueChars name)
+ : name_(std::move(name)),
+ outFN_(alloc),
+ outFNDA_(alloc),
+ numFunctionsFound_(0),
+ numFunctionsHit_(0),
+ outBRDA_(alloc),
+ numBranchesFound_(0),
+ numBranchesHit_(0),
+ numLinesInstrumented_(0),
+ numLinesHit_(0),
+ maxLineHit_(0),
+ hasTopLevelScript_(false),
+ hadOOM_(false) {}
+
+void LCovSource::exportInto(GenericPrinter& out) {
+ if (hadOutOfMemory()) {
+ out.reportOutOfMemory();
+ } else {
+ out.printf("SF:%s\n", name_.get());
+
+ outFN_.exportInto(out);
+ outFNDA_.exportInto(out);
+ out.printf("FNF:%zu\n", numFunctionsFound_);
+ out.printf("FNH:%zu\n", numFunctionsHit_);
+
+ outBRDA_.exportInto(out);
+ out.printf("BRF:%zu\n", numBranchesFound_);
+ out.printf("BRH:%zu\n", numBranchesHit_);
+
+ if (!linesHit_.empty()) {
+ for (size_t lineno = 1; lineno <= maxLineHit_; ++lineno) {
+ if (auto p = linesHit_.lookup(lineno)) {
+ out.printf("DA:%zu,%" PRIu64 "\n", lineno, p->value());
+ }
+ }
+ }
+
+ out.printf("LF:%zu\n", numLinesInstrumented_);
+ out.printf("LH:%zu\n", numLinesHit_);
+
+ out.put("end_of_record\n");
+ }
+
+ outFN_.clear();
+ outFNDA_.clear();
+ numFunctionsFound_ = 0;
+ numFunctionsHit_ = 0;
+ outBRDA_.clear();
+ numBranchesFound_ = 0;
+ numBranchesHit_ = 0;
+ linesHit_.clear();
+ numLinesInstrumented_ = 0;
+ numLinesHit_ = 0;
+ maxLineHit_ = 0;
+}
+
+void LCovSource::writeScript(JSScript* script, const char* scriptName) {
+ if (hadOutOfMemory()) {
+ return;
+ }
+
+ numFunctionsFound_++;
+ outFN_.printf("FN:%u,%s\n", script->lineno(), scriptName);
+
+ uint64_t hits = 0;
+ ScriptCounts* sc = nullptr;
+ if (script->hasScriptCounts()) {
+ sc = &script->getScriptCounts();
+ numFunctionsHit_++;
+ const PCCounts* counts =
+ sc->maybeGetPCCounts(script->pcToOffset(script->main()));
+ outFNDA_.printf("FNDA:%" PRIu64 ",%s\n", counts->numExec(), scriptName);
+
+ // Set the hit count of the pre-main code to 1, if the function ever got
+ // visited.
+ hits = 1;
+ }
+
+ jsbytecode* snpc = script->code();
+ const SrcNote* sn = script->notes();
+ if (!sn->isTerminator()) {
+ snpc += sn->delta();
+ }
+
+ size_t lineno = script->lineno();
+ jsbytecode* end = script->codeEnd();
+ size_t branchId = 0;
+ bool firstLineHasBeenWritten = false;
+ for (jsbytecode* pc = script->code(); pc != end; pc = GetNextPc(pc)) {
+ MOZ_ASSERT(script->code() <= pc && pc < end);
+ JSOp op = JSOp(*pc);
+ bool jump = IsJumpOpcode(op) || op == JSOp::TableSwitch;
+ bool fallsthrough = BytecodeFallsThrough(op);
+
+ // If the current script & pc has a hit-count report, then update the
+ // current number of hits.
+ if (sc) {
+ const PCCounts* counts = sc->maybeGetPCCounts(script->pcToOffset(pc));
+ if (counts) {
+ hits = counts->numExec();
+ }
+ }
+
+ // If we have additional source notes, walk all the source notes of the
+ // current pc.
+ if (snpc <= pc || !firstLineHasBeenWritten) {
+ size_t oldLine = lineno;
+ SrcNoteIterator iter(sn);
+ while (!iter.atEnd() && snpc <= pc) {
+ sn = *iter;
+ SrcNoteType type = sn->type();
+ if (type == SrcNoteType::SetLine) {
+ lineno = SrcNote::SetLine::getLine(sn, script->lineno());
+ } else if (type == SrcNoteType::NewLine) {
+ lineno++;
+ }
+ ++iter;
+ snpc += (*iter)->delta();
+ }
+ sn = *iter;
+
+ if ((oldLine != lineno || !firstLineHasBeenWritten) &&
+ pc >= script->main() && fallsthrough) {
+ auto p = linesHit_.lookupForAdd(lineno);
+ if (!p) {
+ if (!linesHit_.add(p, lineno, hits)) {
+ hadOOM_ = true;
+ return;
+ }
+ numLinesInstrumented_++;
+ if (hits != 0) {
+ numLinesHit_++;
+ }
+ maxLineHit_ = std::max(lineno, maxLineHit_);
+ } else {
+ if (p->value() == 0 && hits != 0) {
+ numLinesHit_++;
+ }
+ p->value() += hits;
+ }
+
+ firstLineHasBeenWritten = true;
+ }
+ }
+
+ // If the current instruction has thrown, then decrement the hit counts
+ // with the number of throws.
+ if (sc) {
+ const PCCounts* counts = sc->maybeGetThrowCounts(script->pcToOffset(pc));
+ if (counts) {
+ hits -= counts->numExec();
+ }
+ }
+
+ // If the current pc corresponds to a conditional jump instruction, then
+ // reports branch hits.
+ if (jump && fallsthrough) {
+ jsbytecode* fallthroughTarget = GetNextPc(pc);
+ uint64_t fallthroughHits = 0;
+ if (sc) {
+ const PCCounts* counts =
+ sc->maybeGetPCCounts(script->pcToOffset(fallthroughTarget));
+ if (counts) {
+ fallthroughHits = counts->numExec();
+ }
+ }
+
+ uint64_t taken = hits - fallthroughHits;
+ outBRDA_.printf("BRDA:%zu,%zu,0,", lineno, branchId);
+ if (hits) {
+ outBRDA_.printf("%" PRIu64 "\n", taken);
+ } else {
+ outBRDA_.put("-\n", 2);
+ }
+
+ outBRDA_.printf("BRDA:%zu,%zu,1,", lineno, branchId);
+ if (hits) {
+ outBRDA_.printf("%" PRIu64 "\n", fallthroughHits);
+ } else {
+ outBRDA_.put("-\n", 2);
+ }
+
+ // Count the number of branches, and the number of branches hit.
+ numBranchesFound_ += 2;
+ if (hits) {
+ numBranchesHit_ += !!taken + !!fallthroughHits;
+ }
+ branchId++;
+ }
+
+ // If the current pc corresponds to a pre-computed switch case, then
+ // reports branch hits for each case statement.
+ if (jump && op == JSOp::TableSwitch) {
+ // Get the default pc.
+ jsbytecode* defaultpc = pc + GET_JUMP_OFFSET(pc);
+ MOZ_ASSERT(script->code() <= defaultpc && defaultpc < end);
+ MOZ_ASSERT(defaultpc > pc);
+
+ // Get the low and high from the tableswitch
+ int32_t low = GET_JUMP_OFFSET(pc + JUMP_OFFSET_LEN * 1);
+ int32_t high = GET_JUMP_OFFSET(pc + JUMP_OFFSET_LEN * 2);
+ MOZ_ASSERT(high - low + 1 >= 0);
+ size_t numCases = high - low + 1;
+
+ auto getCaseOrDefaultPc = [&](size_t index) {
+ if (index < numCases) {
+ return script->tableSwitchCasePC(pc, index);
+ }
+ MOZ_ASSERT(index == numCases);
+ return defaultpc;
+ };
+
+ jsbytecode* firstCaseOrDefaultPc = end;
+ for (size_t j = 0; j < numCases + 1; j++) {
+ jsbytecode* testpc = getCaseOrDefaultPc(j);
+ MOZ_ASSERT(script->code() <= testpc && testpc < end);
+ if (testpc < firstCaseOrDefaultPc) {
+ firstCaseOrDefaultPc = testpc;
+ }
+ }
+
+ // Count the number of hits of the default branch, by subtracting
+ // the number of hits of each cases.
+ uint64_t defaultHits = hits;
+
+ // Count the number of hits of the previous case entry.
+ uint64_t fallsThroughHits = 0;
+
+ // Record branches for each case and default.
+ size_t caseId = 0;
+ for (size_t i = 0; i < numCases + 1; i++) {
+ jsbytecode* caseOrDefaultPc = getCaseOrDefaultPc(i);
+ MOZ_ASSERT(script->code() <= caseOrDefaultPc && caseOrDefaultPc < end);
+
+ // PCs might not be in increasing order of case indexes.
+ jsbytecode* lastCaseOrDefaultPc = firstCaseOrDefaultPc - 1;
+ bool foundLastCaseOrDefault = false;
+ for (size_t j = 0; j < numCases + 1; j++) {
+ jsbytecode* testpc = getCaseOrDefaultPc(j);
+ MOZ_ASSERT(script->code() <= testpc && testpc < end);
+ if (lastCaseOrDefaultPc < testpc &&
+ (testpc < caseOrDefaultPc ||
+ (j < i && testpc == caseOrDefaultPc))) {
+ lastCaseOrDefaultPc = testpc;
+ foundLastCaseOrDefault = true;
+ }
+ }
+
+ // If multiple case instruction have the same code block, only
+ // register the code coverage the first time we hit this case.
+ if (!foundLastCaseOrDefault || caseOrDefaultPc != lastCaseOrDefaultPc) {
+ uint64_t caseOrDefaultHits = 0;
+ if (sc) {
+ if (i < numCases) {
+ // Case (i + low)
+ const PCCounts* counts =
+ sc->maybeGetPCCounts(script->pcToOffset(caseOrDefaultPc));
+ if (counts) {
+ caseOrDefaultHits = counts->numExec();
+ }
+
+ // Remove fallthrough.
+ fallsThroughHits = 0;
+ if (foundLastCaseOrDefault) {
+ // Walk from the previous case to the current one to
+ // check if it fallthrough into the current block.
+ MOZ_ASSERT(lastCaseOrDefaultPc != firstCaseOrDefaultPc - 1);
+ jsbytecode* endpc = lastCaseOrDefaultPc;
+ while (GetNextPc(endpc) < caseOrDefaultPc) {
+ endpc = GetNextPc(endpc);
+ MOZ_ASSERT(script->code() <= endpc && endpc < end);
+ }
+
+ if (BytecodeFallsThrough(JSOp(*endpc))) {
+ fallsThroughHits = script->getHitCount(endpc);
+ }
+ }
+ caseOrDefaultHits -= fallsThroughHits;
+ } else {
+ caseOrDefaultHits = defaultHits;
+ }
+ }
+
+ outBRDA_.printf("BRDA:%zu,%zu,%zu,", lineno, branchId, caseId);
+ if (hits) {
+ outBRDA_.printf("%" PRIu64 "\n", caseOrDefaultHits);
+ } else {
+ outBRDA_.put("-\n", 2);
+ }
+
+ numBranchesFound_++;
+ numBranchesHit_ += !!caseOrDefaultHits;
+ if (i < numCases) {
+ defaultHits -= caseOrDefaultHits;
+ }
+ caseId++;
+ }
+ }
+ }
+ }
+
+ if (outFN_.hadOutOfMemory() || outFNDA_.hadOutOfMemory() ||
+ outBRDA_.hadOutOfMemory()) {
+ hadOOM_ = true;
+ return;
+ }
+
+ // If this script is the top-level script, then record it such that we can
+ // assume that the code coverage report is complete, as this script has
+ // references on all inner scripts.
+ if (script->isTopLevel()) {
+ hasTopLevelScript_ = true;
+ }
+}
+
+LCovRealm::LCovRealm(JS::Realm* realm)
+ : alloc_(4096), outTN_(&alloc_), sources_(alloc_) {
+ // Record realm name. If we wait until finalization, the embedding may not be
+ // able to provide us the name anymore.
+ writeRealmName(realm);
+}
+
+LCovRealm::~LCovRealm() {
+ // The LCovSource are in the LifoAlloc but we must still manually invoke
+ // destructors to avoid leaks.
+ while (!sources_.empty()) {
+ LCovSource* source = sources_.popCopy();
+ source->~LCovSource();
+ }
+}
+
+LCovSource* LCovRealm::lookupOrAdd(const char* name) {
+ // Find existing source if it exists.
+ for (LCovSource* source : sources_) {
+ if (source->match(name)) {
+ return source;
+ }
+ }
+
+ UniqueChars source_name = DuplicateString(name);
+ if (!source_name) {
+ outTN_.reportOutOfMemory();
+ return nullptr;
+ }
+
+ // Allocate a new LCovSource for the current top-level.
+ LCovSource* source = alloc_.new_<LCovSource>(&alloc_, std::move(source_name));
+ if (!source) {
+ outTN_.reportOutOfMemory();
+ return nullptr;
+ }
+
+ if (!sources_.emplaceBack(source)) {
+ outTN_.reportOutOfMemory();
+ return nullptr;
+ }
+
+ return source;
+}
+
+void LCovRealm::exportInto(GenericPrinter& out, bool* isEmpty) const {
+ if (outTN_.hadOutOfMemory()) {
+ return;
+ }
+
+ // If we only have cloned function, then do not serialize anything.
+ bool someComplete = false;
+ for (const LCovSource* sc : sources_) {
+ if (sc->isComplete()) {
+ someComplete = true;
+ break;
+ };
+ }
+
+ if (!someComplete) {
+ return;
+ }
+
+ *isEmpty = false;
+ outTN_.exportInto(out);
+ for (LCovSource* sc : sources_) {
+ // Only write if everything got recorded.
+ if (sc->isComplete()) {
+ sc->exportInto(out);
+ }
+ }
+}
+
+void LCovRealm::writeRealmName(JS::Realm* realm) {
+ JSContext* cx = TlsContext.get();
+
+ // lcov trace files are starting with an optional test case name, that we
+ // recycle to be a realm name.
+ //
+ // Note: The test case name has some constraint in terms of valid character,
+ // thus we escape invalid chracters with a "_" symbol in front of its
+ // hexadecimal code.
+ outTN_.put("TN:");
+ if (cx->runtime()->realmNameCallback) {
+ char name[1024];
+ {
+ // Hazard analysis cannot tell that the callback does not GC.
+ JS::AutoSuppressGCAnalysis nogc;
+ (*cx->runtime()->realmNameCallback)(cx, realm, name, sizeof(name), nogc);
+ }
+ for (char* s = name; s < name + sizeof(name) && *s; s++) {
+ if (('a' <= *s && *s <= 'z') || ('A' <= *s && *s <= 'Z') ||
+ ('0' <= *s && *s <= '9')) {
+ outTN_.put(s, 1);
+ continue;
+ }
+ outTN_.printf("_%p", (void*)size_t(*s));
+ }
+ outTN_.put("\n", 1);
+ } else {
+ outTN_.printf("Realm_%p%p\n", (void*)size_t('_'), realm);
+ }
+}
+
+const char* LCovRealm::getScriptName(JSScript* script) {
+ JSFunction* fun = script->function();
+ if (fun && fun->displayAtom()) {
+ JSAtom* atom = fun->displayAtom();
+ size_t lenWithNull = js::PutEscapedString(nullptr, 0, atom, 0) + 1;
+ char* name = alloc_.newArray<char>(lenWithNull);
+ if (name) {
+ js::PutEscapedString(name, lenWithNull, atom, 0);
+ }
+ return name;
+ }
+ return "top-level";
+}
+
+bool gLCovIsEnabled = false;
+
+void InitLCov() {
+ const char* outDir = getenv("JS_CODE_COVERAGE_OUTPUT_DIR");
+ if (outDir && *outDir != 0) {
+ EnableLCov();
+ }
+}
+
+void EnableLCov() {
+ MOZ_ASSERT(!JSRuntime::hasLiveRuntimes(),
+ "EnableLCov must not be called after creating a runtime!");
+ gLCovIsEnabled = true;
+}
+
+LCovRuntime::LCovRuntime() : out_(), pid_(getpid()), isEmpty_(true) {}
+
+LCovRuntime::~LCovRuntime() {
+ if (out_.isInitialized()) {
+ finishFile();
+ }
+}
+
+bool LCovRuntime::fillWithFilename(char* name, size_t length) {
+ const char* outDir = getenv("JS_CODE_COVERAGE_OUTPUT_DIR");
+ if (!outDir || *outDir == 0) {
+ return false;
+ }
+
+ int64_t timestamp = static_cast<double>(PRMJ_Now()) / PRMJ_USEC_PER_SEC;
+ static mozilla::Atomic<size_t> globalRuntimeId(0);
+ size_t rid = globalRuntimeId++;
+
+ int len = snprintf(name, length, "%s/%" PRId64 "-%" PRIu32 "-%zu.info",
+ outDir, timestamp, pid_, rid);
+ if (len < 0 || size_t(len) >= length) {
+ fprintf(stderr,
+ "Warning: LCovRuntime::init: Cannot serialize file name.\n");
+ return false;
+ }
+
+ return true;
+}
+
+void LCovRuntime::init() {
+ char name[1024];
+ if (!fillWithFilename(name, sizeof(name))) {
+ return;
+ }
+
+ // If we cannot open the file, report a warning.
+ if (!out_.init(name)) {
+ fprintf(stderr,
+ "Warning: LCovRuntime::init: Cannot open file named '%s'.\n", name);
+ }
+ isEmpty_ = true;
+}
+
+void LCovRuntime::finishFile() {
+ MOZ_ASSERT(out_.isInitialized());
+ out_.finish();
+
+ if (isEmpty_) {
+ char name[1024];
+ if (!fillWithFilename(name, sizeof(name))) {
+ return;
+ }
+ remove(name);
+ }
+}
+
+void LCovRuntime::writeLCovResult(LCovRealm& realm) {
+ if (!out_.isInitialized()) {
+ init();
+ if (!out_.isInitialized()) {
+ return;
+ }
+ }
+
+ uint32_t p = getpid();
+ if (pid_ != p) {
+ pid_ = p;
+ finishFile();
+ init();
+ if (!out_.isInitialized()) {
+ return;
+ }
+ }
+
+ realm.exportInto(out_, &isEmpty_);
+ out_.flush();
+ finishFile();
+}
+
+bool InitScriptCoverage(JSContext* cx, JSScript* script) {
+ MOZ_ASSERT(IsLCovEnabled());
+ MOZ_ASSERT(script->hasBytecode(),
+ "Only initialize coverage data for fully initialized scripts.");
+
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+
+ const char* filename = script->filename();
+ if (!filename) {
+ return true;
+ }
+
+ // Create LCovRealm if necessary.
+ LCovRealm* lcovRealm = script->realm()->lcovRealm();
+ if (!lcovRealm) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Create LCovSource if necessary.
+ LCovSource* source = lcovRealm->lookupOrAdd(filename);
+ if (!source) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Computed the formated script name.
+ const char* scriptName = lcovRealm->getScriptName(script);
+ if (!scriptName) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Create Zone::scriptLCovMap if necessary.
+ JS::Zone* zone = script->zone();
+ if (!zone->scriptLCovMap) {
+ zone->scriptLCovMap = cx->make_unique<ScriptLCovMap>();
+ }
+ if (!zone->scriptLCovMap) {
+ return false;
+ }
+
+ MOZ_ASSERT(script->hasBytecode());
+
+ // Save source in map for when we collect coverage.
+ if (!zone->scriptLCovMap->putNew(script,
+ std::make_tuple(source, scriptName))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool CollectScriptCoverage(JSScript* script, bool finalizing) {
+ MOZ_ASSERT(IsLCovEnabled());
+
+ ScriptLCovMap* map = script->zone()->scriptLCovMap.get();
+ if (!map) {
+ return false;
+ }
+
+ auto p = map->lookup(script);
+ if (!p.found()) {
+ return false;
+ }
+
+ auto [source, scriptName] = p->value();
+
+ if (script->hasBytecode()) {
+ source->writeScript(script, scriptName);
+ }
+
+ if (finalizing) {
+ map->remove(p);
+ }
+
+ // Propagate the failure in case caller wants to terminate early.
+ return !source->hadOutOfMemory();
+}
+
+} // namespace coverage
+} // namespace js
diff --git a/js/src/vm/CodeCoverage.h b/js/src/vm/CodeCoverage.h
new file mode 100644
index 0000000000..6ebfee6b13
--- /dev/null
+++ b/js/src/vm/CodeCoverage.h
@@ -0,0 +1,172 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_CodeCoverage_h
+#define vm_CodeCoverage_h
+
+#include "mozilla/Vector.h"
+
+#include "ds/LifoAlloc.h"
+
+#include "js/AllocPolicy.h"
+#include "js/HashTable.h"
+#include "js/Printer.h"
+#include "js/TypeDecls.h"
+#include "js/Utility.h"
+
+namespace js {
+namespace coverage {
+
+class LCovSource {
+ public:
+ LCovSource(LifoAlloc* alloc, JS::UniqueChars name);
+
+ // Whether the given script name matches this LCovSource.
+ bool match(const char* name) const { return strcmp(name_.get(), name) == 0; }
+
+ // Whether an OOM was seen recording coverage information. This indicates
+ // that the resulting coverage information is incomplete.
+ bool hadOutOfMemory() const { return hadOOM_; }
+
+ // Whether the current source is complete and if it can be flushed.
+ bool isComplete() const { return hasTopLevelScript_; }
+
+ // Iterate over the bytecode and collect the lcov output based on the
+ // ScriptCounts counters.
+ void writeScript(JSScript* script, const char* scriptName);
+
+ // Write the Lcov output in a buffer, such as the one associated with
+ // the runtime code coverage trace file.
+ void exportInto(GenericPrinter& out);
+
+ private:
+ // Name of the source file.
+ JS::UniqueChars name_;
+
+ // LifoAlloc strings which hold the filename of each function as
+ // well as the number of hits for each function.
+ LSprinter outFN_;
+ LSprinter outFNDA_;
+ size_t numFunctionsFound_;
+ size_t numFunctionsHit_;
+
+ // LifoAlloc string which hold branches statistics.
+ LSprinter outBRDA_;
+ size_t numBranchesFound_;
+ size_t numBranchesHit_;
+
+ // Holds lines statistics. When processing a line hit count, the hit count
+ // is added to any hit count already in the hash map so that we handle
+ // lines that belong to more than one JSScript or function in the same
+ // source file.
+ HashMap<size_t, uint64_t, DefaultHasher<size_t>, SystemAllocPolicy> linesHit_;
+ size_t numLinesInstrumented_;
+ size_t numLinesHit_;
+ size_t maxLineHit_;
+
+ // Status flags.
+ bool hasTopLevelScript_ : 1;
+ bool hadOOM_ : 1;
+};
+
+class LCovRealm {
+ public:
+ explicit LCovRealm(JS::Realm* realm);
+ ~LCovRealm();
+
+ // Write the Lcov output in a buffer, such as the one associated with
+ // the runtime code coverage trace file.
+ void exportInto(GenericPrinter& out, bool* isEmpty) const;
+
+ friend bool InitScriptCoverage(JSContext* cx, JSScript* script);
+
+ private:
+ // Write the realm name in outTN_.
+ void writeRealmName(JS::Realm* realm);
+
+ // Return the LCovSource entry which matches the given ScriptSourceObject.
+ LCovSource* lookupOrAdd(const char* name);
+
+ // Generate escaped form of script atom and allocate inside our LifoAlloc if
+ // necessary.
+ const char* getScriptName(JSScript* script);
+
+ private:
+ typedef mozilla::Vector<LCovSource*, 16, LifoAllocPolicy<Fallible>>
+ LCovSourceVector;
+
+ // LifoAlloc backend for all temporary allocations needed to stash the
+ // strings to be written in the file.
+ LifoAlloc alloc_;
+
+ // LifoAlloc string which hold the name of the realm.
+ LSprinter outTN_;
+
+ // Vector of all sources which are used in this realm. The entries are
+ // allocated within the LifoAlloc.
+ LCovSourceVector sources_;
+};
+
+class LCovRuntime {
+ public:
+ LCovRuntime();
+ ~LCovRuntime();
+
+ // If the environment variable JS_CODE_COVERAGE_OUTPUT_DIR is set to a
+ // directory, create a file inside this directory which uses the process
+ // ID, the thread ID and a timestamp to ensure the uniqueness of the
+ // file.
+ //
+ // At the end of the execution, this file should contains the LCOV output of
+ // all the scripts executed in the current JSRuntime.
+ void init();
+
+ // Write the aggregated result of the code coverage of a realm
+ // into a file.
+ void writeLCovResult(LCovRealm& realm);
+
+ private:
+ // Fill an array with the name of the file. Return false if we are unable to
+ // serialize the filename in this array.
+ bool fillWithFilename(char* name, size_t length);
+
+ // Finish the current opened file, and remove if it does not have any
+ // content.
+ void finishFile();
+
+ private:
+ // Output file which is created if code coverage is enabled.
+ Fprinter out_;
+
+ // The process' PID is used to watch for fork. When the process fork,
+ // we want to close the current file and open a new one.
+ uint32_t pid_;
+
+ // Flag used to report if the generated file is empty or not. If it is empty
+ // when the runtime is destroyed, then the file would be removed as an empty
+ // file is not a valid LCov file.
+ bool isEmpty_;
+};
+
+void InitLCov();
+
+void EnableLCov();
+
+inline bool IsLCovEnabled() {
+ extern bool gLCovIsEnabled;
+ return gLCovIsEnabled;
+}
+
+// Initialize coverage info to track code coverage for a JSScript.
+bool InitScriptCoverage(JSContext* cx, JSScript* script);
+
+// Collect the code-coverage data from a script into relevant LCovSource.
+bool CollectScriptCoverage(JSScript* script, bool finalizing);
+
+} // namespace coverage
+} // namespace js
+
+#endif // vm_Printer_h
diff --git a/js/src/vm/CommonPropertyNames.h b/js/src/vm/CommonPropertyNames.h
new file mode 100644
index 0000000000..cc818ea068
--- /dev/null
+++ b/js/src/vm/CommonPropertyNames.h
@@ -0,0 +1,619 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* A higher-order macro for enumerating all cached property names. */
+
+#ifndef vm_CommonPropertyNames_h
+#define vm_CommonPropertyNames_h
+
+// The following common atoms are reserved by the js::StaticStrigs /
+// js::frontend::WellKnownParserAtoms{,_ROM} mechanisms. We still use a named
+// reference for the parser and VM to use.
+//
+// Parameter list is (IDPART, ID, TEXT).
+//
+// Each entry should use one of MACRO* based on the length of TEXT
+// * MACRO0: length-0 text
+// * MACRO1: length-1 text
+// * MACRO2: length-2 text
+// * MACRO_: other text
+#define FOR_EACH_COMMON_PROPERTYNAME_(MACRO0, MACRO1, MACRO2, MACRO_) \
+ MACRO_(abort, abort, "abort") \
+ IF_DECORATORS(MACRO_(access, access, "access")) \
+ IF_DECORATORS(MACRO_(accessor, accessor, "accessor")) \
+ MACRO_(add, add, "add") \
+ IF_DECORATORS(MACRO_(addInitializer, addInitializer, "addInitializer")) \
+ MACRO_(allowContentIter, allowContentIter, "allowContentIter") \
+ MACRO_(allowContentIterWith, allowContentIterWith, "allowContentIterWith") \
+ MACRO_(ambiguous, ambiguous, "ambiguous") \
+ MACRO_(anonymous, anonymous, "anonymous") \
+ MACRO_(Any, Any, "Any") \
+ MACRO_(apply, apply, "apply") \
+ MACRO_(approximatelySign, approximatelySign, "approximatelySign") \
+ MACRO_(arguments, arguments, "arguments") \
+ MACRO_(ArgumentsLength, ArgumentsLength, "ArgumentsLength") \
+ MACRO_(ArrayBufferSpecies, ArrayBufferSpecies, "$ArrayBufferSpecies") \
+ MACRO_(ArrayIterator, ArrayIterator, "Array Iterator") \
+ MACRO_(ArrayIteratorNext, ArrayIteratorNext, "ArrayIteratorNext") \
+ MACRO_(ArraySort, ArraySort, "ArraySort") \
+ MACRO_(ArraySpecies, ArraySpecies, "$ArraySpecies") \
+ MACRO_(ArraySpeciesCreate, ArraySpeciesCreate, "ArraySpeciesCreate") \
+ MACRO_(ArrayToLocaleString, ArrayToLocaleString, "ArrayToLocaleString") \
+ MACRO_(ArrayType, ArrayType, "ArrayType") \
+ MACRO_(ArrayValues, ArrayValues, "$ArrayValues") \
+ MACRO2(as, as, "as") \
+ MACRO_(assert, assert_, "assert") \
+ MACRO_(Async, Async, "Async") \
+ MACRO_(async, async, "async") \
+ MACRO_(AsyncFromSyncIterator, AsyncFromSyncIterator, \
+ "Async-from-Sync Iterator") \
+ MACRO_(AsyncFunctionNext, AsyncFunctionNext, "AsyncFunctionNext") \
+ MACRO_(AsyncFunctionThrow, AsyncFunctionThrow, "AsyncFunctionThrow") \
+ MACRO_(AsyncGenerator, AsyncGenerator, "AsyncGenerator") \
+ MACRO_(AsyncGeneratorNext, AsyncGeneratorNext, "AsyncGeneratorNext") \
+ MACRO_(AsyncGeneratorReturn, AsyncGeneratorReturn, "AsyncGeneratorReturn") \
+ MACRO_(AsyncGeneratorThrow, AsyncGeneratorThrow, "AsyncGeneratorThrow") \
+ MACRO_(AsyncWrapped, AsyncWrapped, "AsyncWrapped") \
+ MACRO2(at, at, "at") \
+ MACRO_(autoAllocateChunkSize, autoAllocateChunkSize, \
+ "autoAllocateChunkSize") \
+ MACRO_(await, await, "await") \
+ MACRO_(bigint64, bigint64, "bigint64") \
+ MACRO_(biguint64, biguint64, "biguint64") \
+ MACRO_(bound, bound, "bound") \
+ MACRO_(boundWithSpace, boundWithSpace, "bound ") \
+ MACRO_(break, break_, "break") \
+ MACRO_(breakdown, breakdown, "breakdown") \
+ MACRO_(buffer, buffer, "buffer") \
+ MACRO_(builder, builder, "builder") \
+ MACRO2(by, by, "by") \
+ MACRO_(byob, byob, "byob") \
+ MACRO_(byteAlignment, byteAlignment, "byteAlignment") \
+ MACRO_(byteLength, byteLength, "byteLength") \
+ MACRO_(byteOffset, byteOffset, "byteOffset") \
+ MACRO_(bytes, bytes, "bytes") \
+ MACRO_(BYTES_PER_ELEMENT, BYTES_PER_ELEMENT, "BYTES_PER_ELEMENT") \
+ MACRO_(calendar, calendar, "calendar") \
+ MACRO_(call, call, "call") \
+ MACRO_(callContentFunction, callContentFunction, "callContentFunction") \
+ MACRO_(callee, callee, "callee") \
+ MACRO_(caller, caller, "caller") \
+ MACRO_(callFunction, callFunction, "callFunction") \
+ MACRO_(cancel, cancel, "cancel") \
+ MACRO_(case, case_, "case") \
+ MACRO_(caseFirst, caseFirst, "caseFirst") \
+ MACRO_(catch, catch_, "catch") \
+ MACRO_(cause, cause, "cause") \
+ MACRO_(class, class_, "class") \
+ MACRO_(cleanupSome, cleanupSome, "cleanupSome") \
+ MACRO_(close, close, "close") \
+ MACRO_(collation, collation, "collation") \
+ MACRO_(collections, collections, "collections") \
+ MACRO_(columnNumber, columnNumber, "columnNumber") \
+ MACRO1(comma, comma, ",") \
+ MACRO_(compact, compact, "compact") \
+ MACRO_(compactDisplay, compactDisplay, "compactDisplay") \
+ MACRO_(compare, compare, "compare") \
+ MACRO_(configurable, configurable, "configurable") \
+ MACRO_(const, const_, "const") \
+ MACRO_(construct, construct, "construct") \
+ MACRO_(constructContentFunction, constructContentFunction, \
+ "constructContentFunction") \
+ MACRO_(constructor, constructor, "constructor") \
+ MACRO_(continue, continue_, "continue") \
+ MACRO_(CopyDataProperties, CopyDataProperties, "CopyDataProperties") \
+ MACRO_(CopyDataPropertiesUnfiltered, CopyDataPropertiesUnfiltered, \
+ "CopyDataPropertiesUnfiltered") \
+ MACRO_(copyWithin, copyWithin, "copyWithin") \
+ MACRO_(count, count, "count") \
+ MACRO_(CreateResolvingFunctions, CreateResolvingFunctions, \
+ "CreateResolvingFunctions") \
+ MACRO_(currency, currency, "currency") \
+ MACRO_(currencyDisplay, currencyDisplay, "currencyDisplay") \
+ MACRO_(currencySign, currencySign, "currencySign") \
+ MACRO_(dateStyle, dateStyle, "dateStyle") \
+ MACRO_(day, day, "day") \
+ MACRO_(dayPeriod, dayPeriod, "dayPeriod") \
+ MACRO_(debugger, debugger, "debugger") \
+ MACRO_(decimal, decimal, "decimal") \
+ MACRO_(decodeURI, decodeURI, "decodeURI") \
+ MACRO_(decodeURIComponent, decodeURIComponent, "decodeURIComponent") \
+ MACRO_(default, default_, "default") \
+ MACRO_(defineDataPropertyIntrinsic, defineDataPropertyIntrinsic, \
+ "DefineDataProperty") \
+ MACRO_(defineGetter, defineGetter, "__defineGetter__") \
+ MACRO_(defineProperty, defineProperty, "defineProperty") \
+ MACRO_(defineSetter, defineSetter, "__defineSetter__") \
+ MACRO_(delete, delete_, "delete") \
+ MACRO_(deleteProperty, deleteProperty, "deleteProperty") \
+ MACRO_(difference, difference, "difference") \
+ MACRO_(displayURL, displayURL, "displayURL") \
+ MACRO2(do, do_, "do") \
+ MACRO_(domNode, domNode, "domNode") \
+ MACRO_(done, done, "done") \
+ MACRO_(dotAll, dotAll, "dotAll") \
+ MACRO_(dotArgs, dotArgs, ".args") \
+ MACRO_(dotFieldKeys, dotFieldKeys, ".fieldKeys") \
+ MACRO_(dotGenerator, dotGenerator, ".generator") \
+ MACRO_(dotInitializers, dotInitializers, ".initializers") \
+ MACRO_(dotNewTarget, dotNewTarget, ".newTarget") \
+ MACRO_(dotPrivateBrand, dotPrivateBrand, ".privateBrand") \
+ MACRO_(dotStaticFieldKeys, dotStaticFieldKeys, ".staticFieldKeys") \
+ MACRO_(dotStaticInitializers, dotStaticInitializers, ".staticInitializers") \
+ MACRO_(dotThis, dotThis, ".this") \
+ MACRO_(each, each, "each") \
+ MACRO_(element, element, "element") \
+ MACRO_(elementType, elementType, "elementType") \
+ MACRO_(else, else_, "else") \
+ MACRO0(empty, empty, "") \
+ MACRO_(emptyRegExp, emptyRegExp, "(?:)") \
+ MACRO_(encodeURI, encodeURI, "encodeURI") \
+ MACRO_(encodeURIComponent, encodeURIComponent, "encodeURIComponent") \
+ MACRO_(endRange, endRange, "endRange") \
+ MACRO_(endTimestamp, endTimestamp, "endTimestamp") \
+ MACRO_(entries, entries, "entries") \
+ MACRO_(enum, enum_, "enum") \
+ MACRO_(enumerable, enumerable, "enumerable") \
+ MACRO_(enumerate, enumerate, "enumerate") \
+ MACRO_(era, era, "era") \
+ MACRO_(errors, errors, "errors") \
+ MACRO_(ErrorToStringWithTrailingNewline, ErrorToStringWithTrailingNewline, \
+ "ErrorToStringWithTrailingNewline") \
+ MACRO_(escape, escape, "escape") \
+ MACRO_(eval, eval, "eval") \
+ MACRO_(exec, exec, "exec") \
+ MACRO_(exponentInteger, exponentInteger, "exponentInteger") \
+ MACRO_(exponentMinusSign, exponentMinusSign, "exponentMinusSign") \
+ MACRO_(exponentSeparator, exponentSeparator, "exponentSeparator") \
+ MACRO_(export, export_, "export") \
+ MACRO_(extends, extends, "extends") \
+ MACRO_(false, false_, "false") \
+ MACRO_(few, few, "few") \
+ IF_DECORATORS(MACRO_(field, field, "field")) \
+ MACRO_(fieldOffsets, fieldOffsets, "fieldOffsets") \
+ MACRO_(fieldTypes, fieldTypes, "fieldTypes") \
+ MACRO_(fileName, fileName, "fileName") \
+ MACRO_(fill, fill, "fill") \
+ MACRO_(finally, finally_, "finally") \
+ MACRO_(find, find, "find") \
+ MACRO_(findIndex, findIndex, "findIndex") \
+ MACRO_(findLast, findLast, "findLast") \
+ MACRO_(findLastIndex, findLastIndex, "findLastIndex") \
+ MACRO_(firstDayOfWeek, firstDayOfWeek, "firstDayOfWeek") \
+ MACRO_(fix, fix, "fix") \
+ MACRO_(flags, flags, "flags") \
+ MACRO_(flat, flat, "flat") \
+ MACRO_(flatMap, flatMap, "flatMap") \
+ MACRO_(float32, float32, "float32") \
+ MACRO_(float64, float64, "float64") \
+ MACRO_(for, for_, "for") \
+ MACRO_(forceInterpreter, forceInterpreter, "forceInterpreter") \
+ MACRO_(forEach, forEach, "forEach") \
+ MACRO_(format, format, "format") \
+ MACRO_(fraction, fraction, "fraction") \
+ MACRO_(fractionalSecond, fractionalSecond, "fractionalSecond") \
+ MACRO_(fractionalSecondDigits, fractionalSecondDigits, \
+ "fractionalSecondDigits") \
+ MACRO_(frame, frame, "frame") \
+ MACRO_(from, from, "from") \
+ MACRO_(fromAsync, fromAsync, "fromAsync") \
+ MACRO_(fulfilled, fulfilled, "fulfilled") \
+ MACRO_(futexNotEqual, futexNotEqual, "not-equal") \
+ MACRO2(futexOK, futexOK, "ok") \
+ MACRO_(futexTimedOut, futexTimedOut, "timed-out") \
+ MACRO_(GatherAsyncParentCompletions, GatherAsyncParentCompletions, \
+ "GatherAsyncParentCompletions") \
+ MACRO_(gcCycleNumber, gcCycleNumber, "gcCycleNumber") \
+ MACRO_(Generator, Generator, "Generator") \
+ MACRO_(GeneratorNext, GeneratorNext, "GeneratorNext") \
+ MACRO_(GeneratorReturn, GeneratorReturn, "GeneratorReturn") \
+ MACRO_(GeneratorThrow, GeneratorThrow, "GeneratorThrow") \
+ MACRO_(get, get, "get") \
+ IF_DECORATORS(MACRO_(getter, getter, "getter")) \
+ MACRO_(GetAggregateError, GetAggregateError, "GetAggregateError") \
+ MACRO_(GetArgument, GetArgument, "GetArgument") \
+ MACRO_(getBigInt64, getBigInt64, "getBigInt64") \
+ MACRO_(getBigUint64, getBigUint64, "getBigUint64") \
+ MACRO_(GetBuiltinConstructor, GetBuiltinConstructor, \
+ "GetBuiltinConstructor") \
+ MACRO_(GetBuiltinPrototype, GetBuiltinPrototype, "GetBuiltinPrototype") \
+ MACRO_(GetBuiltinSymbol, GetBuiltinSymbol, "GetBuiltinSymbol") \
+ MACRO_(GetInternalError, GetInternalError, "GetInternalError") \
+ MACRO_(getInternals, getInternals, "getInternals") \
+ MACRO_(GetModuleNamespace, GetModuleNamespace, "GetModuleNamespace") \
+ MACRO_(getOwnPropertyDescriptor, getOwnPropertyDescriptor, \
+ "getOwnPropertyDescriptor") \
+ MACRO_(getOwnPropertyNames, getOwnPropertyNames, "getOwnPropertyNames") \
+ MACRO_(getPropertySuper, getPropertySuper, "getPropertySuper") \
+ MACRO_(getPrototypeOf, getPrototypeOf, "getPrototypeOf") \
+ MACRO_(GetTypeError, GetTypeError, "GetTypeError") \
+ MACRO_(global, global, "global") \
+ MACRO_(globalThis, globalThis, "globalThis") \
+ MACRO_(group, group, "group") \
+ MACRO_(groups, groups, "groups") \
+ MACRO_(groupToMap, groupToMap, "groupToMap") \
+ MACRO_(h11, h11, "h11") \
+ MACRO_(h12, h12, "h12") \
+ MACRO_(h23, h23, "h23") \
+ MACRO_(h24, h24, "h24") \
+ MACRO_(Handle, Handle, "Handle") \
+ MACRO_(has, has, "has") \
+ MACRO_(hashConstructor, hashConstructor, "#constructor") \
+ MACRO_(hasIndices, hasIndices, "hasIndices") \
+ MACRO_(hasOwn, hasOwn, "hasOwn") \
+ MACRO_(hasOwnProperty, hasOwnProperty, "hasOwnProperty") \
+ MACRO_(highWaterMark, highWaterMark, "highWaterMark") \
+ MACRO_(hour, hour, "hour") \
+ MACRO_(hour12, hour12, "hour12") \
+ MACRO_(hourCycle, hourCycle, "hourCycle") \
+ MACRO2(if, if_, "if") \
+ MACRO_(ignoreCase, ignoreCase, "ignoreCase") \
+ MACRO_(ignorePunctuation, ignorePunctuation, "ignorePunctuation") \
+ MACRO_(implements, implements, "implements") \
+ MACRO_(import, import, "import") \
+ MACRO2(in, in, "in") \
+ MACRO_(includes, includes, "includes") \
+ MACRO_(incumbentGlobal, incumbentGlobal, "incumbentGlobal") \
+ MACRO_(index, index, "index") \
+ MACRO_(indices, indices, "indices") \
+ MACRO_(infinity, infinity, "infinity") \
+ MACRO_(intersection, intersection, "intersection") \
+ MACRO_(Infinity, Infinity, "Infinity") \
+ MACRO_(initial, initial, "initial") \
+ MACRO_(InitializeCollator, InitializeCollator, "InitializeCollator") \
+ MACRO_(InitializeDateTimeFormat, InitializeDateTimeFormat, \
+ "InitializeDateTimeFormat") \
+ MACRO_(InitializeDisplayNames, InitializeDisplayNames, \
+ "InitializeDisplayNames") \
+ MACRO_(InitializeListFormat, InitializeListFormat, "InitializeListFormat") \
+ MACRO_(InitializeLocale, InitializeLocale, "InitializeLocale") \
+ MACRO_(InitializeNumberFormat, InitializeNumberFormat, \
+ "InitializeNumberFormat") \
+ MACRO_(InitializePluralRules, InitializePluralRules, \
+ "InitializePluralRules") \
+ MACRO_(InitializeRelativeTimeFormat, InitializeRelativeTimeFormat, \
+ "InitializeRelativeTimeFormat") \
+ MACRO_(innermost, innermost, "innermost") \
+ MACRO_(inNursery, inNursery, "inNursery") \
+ MACRO_(input, input, "input") \
+ MACRO_(instanceof, instanceof, "instanceof") \
+ MACRO_(int8, int8, "int8") \
+ MACRO_(int16, int16, "int16") \
+ MACRO_(int32, int32, "int32") \
+ MACRO_(integer, integer, "integer") \
+ MACRO_(interface, interface, "interface") \
+ MACRO_(InterpretGeneratorResume, InterpretGeneratorResume, \
+ "InterpretGeneratorResume") \
+ MACRO_(InvalidDate, InvalidDate, "Invalid Date") \
+ MACRO_(isBreakpoint, isBreakpoint, "isBreakpoint") \
+ IF_DECORATORS(MACRO_(IsCallable, IsCallable, "IsCallable")) \
+ MACRO_(isDisjointFrom, isDisjointFrom, "isDisjointFrom") \
+ MACRO_(isEntryPoint, isEntryPoint, "isEntryPoint") \
+ MACRO_(isExtensible, isExtensible, "isExtensible") \
+ MACRO_(isFinite, isFinite, "isFinite") \
+ MACRO_(isNaN, isNaN, "isNaN") \
+ MACRO_(IsNullOrUndefined, IsNullOrUndefined, "IsNullOrUndefined") \
+ MACRO_(isPrototypeOf, isPrototypeOf, "isPrototypeOf") \
+ MACRO_(isStepStart, isStepStart, "isStepStart") \
+ MACRO_(isSubsetOf, isSubsetOf, "isSubsetOf") \
+ MACRO_(isSupersetOf, isSupersetOf, "isSupersetOf") \
+ MACRO_(IterableToList, IterableToList, "IterableToList") \
+ MACRO_(iterate, iterate, "iterate") \
+ MACRO_(join, join, "join") \
+ MACRO2(js, js, "js") \
+ MACRO_(keys, keys, "keys") \
+ IF_DECORATORS(MACRO_(kind, kind, "kind")) \
+ MACRO_(label, label, "label") \
+ MACRO_(language, language, "language") \
+ MACRO_(lastIndex, lastIndex, "lastIndex") \
+ MACRO_(length, length, "length") \
+ MACRO_(let, let, "let") \
+ MACRO_(line, line, "line") \
+ MACRO_(lineNumber, lineNumber, "lineNumber") \
+ MACRO_(literal, literal, "literal") \
+ MACRO_(loc, loc, "loc") \
+ MACRO_(locale, locale, "locale") \
+ MACRO_(lookupGetter, lookupGetter, "__lookupGetter__") \
+ MACRO_(lookupSetter, lookupSetter, "__lookupSetter__") \
+ MACRO_(many, many, "many") \
+ MACRO_(MapConstructorInit, MapConstructorInit, "MapConstructorInit") \
+ MACRO_(MapIterator, MapIterator, "Map Iterator") \
+ MACRO_(maxColumn, maxColumn, "maxColumn") \
+ MACRO_(maximum, maximum, "maximum") \
+ MACRO_(maximumFractionDigits, maximumFractionDigits, \
+ "maximumFractionDigits") \
+ MACRO_(maximumSignificantDigits, maximumSignificantDigits, \
+ "maximumSignificantDigits") \
+ MACRO_(maxLine, maxLine, "maxLine") \
+ MACRO_(maxOffset, maxOffset, "maxOffset") \
+ MACRO_(message, message, "message") \
+ IF_DECORATORS(MACRO_(method, method, "method")) \
+ MACRO_(meta, meta, "meta") \
+ MACRO_(minColumn, minColumn, "minColumn") \
+ MACRO_(minDays, minDays, "minDays") \
+ MACRO_(minimum, minimum, "minimum") \
+ MACRO_(minimumFractionDigits, minimumFractionDigits, \
+ "minimumFractionDigits") \
+ MACRO_(minimumIntegerDigits, minimumIntegerDigits, "minimumIntegerDigits") \
+ MACRO_(minimumSignificantDigits, minimumSignificantDigits, \
+ "minimumSignificantDigits") \
+ MACRO_(minLine, minLine, "minLine") \
+ MACRO_(minOffset, minOffset, "minOffset") \
+ MACRO_(minusSign, minusSign, "minusSign") \
+ MACRO_(minute, minute, "minute") \
+ MACRO_(missingArguments, missingArguments, "missingArguments") \
+ MACRO_(mode, mode, "mode") \
+ MACRO_(module, module, "module") \
+ MACRO_(Module, Module, "Module") \
+ MACRO_(ModuleEvaluate, ModuleEvaluate, "ModuleEvaluate") \
+ MACRO_(ModuleInstantiate, ModuleInstantiate, "ModuleInstantiate") \
+ MACRO_(month, month, "month") \
+ MACRO_(multiline, multiline, "multiline") \
+ MACRO_(mutable, mutable_, "mutable") \
+ MACRO_(name, name, "name") \
+ MACRO_(nan, nan, "nan") \
+ MACRO_(NaN, NaN, "NaN") \
+ MACRO_(NegativeInfinity, NegativeInfinity, "-Infinity") \
+ MACRO_(new, new_, "new") \
+ MACRO_(next, next, "next") \
+ MACRO_(NFC, NFC, "NFC") \
+ MACRO_(NFD, NFD, "NFD") \
+ MACRO_(NFKC, NFKC, "NFKC") \
+ MACRO_(NFKD, NFKD, "NFKD") \
+ MACRO_(noFilename, noFilename, "noFilename") \
+ MACRO_(nonincrementalReason, nonincrementalReason, "nonincrementalReason") \
+ MACRO_(NoPrivateGetter, NoPrivateGetter, "NoPrivateGetter") \
+ MACRO_(noStack, noStack, "noStack") \
+ MACRO_(notation, notation, "notation") \
+ MACRO_(notes, notes, "notes") \
+ MACRO_(null, null, "null") \
+ MACRO_(numberingSystem, numberingSystem, "numberingSystem") \
+ MACRO_(numeric, numeric, "numeric") \
+ MACRO_(objectArguments, objectArguments, "[object Arguments]") \
+ MACRO_(objectArray, objectArray, "[object Array]") \
+ MACRO_(objectBigInt, objectBigInt, "[object BigInt]") \
+ MACRO_(objectBoolean, objectBoolean, "[object Boolean]") \
+ MACRO_(objectDate, objectDate, "[object Date]") \
+ MACRO_(objectError, objectError, "[object Error]") \
+ MACRO_(objectFunction, objectFunction, "[object Function]") \
+ MACRO_(objectNull, objectNull, "[object Null]") \
+ MACRO_(objectNumber, objectNumber, "[object Number]") \
+ MACRO_(objectObject, objectObject, "[object Object]") \
+ IF_RECORD_TUPLE(MACRO_(objectRecord, objectRecord, "[object Record]")) \
+ MACRO_(objectRegExp, objectRegExp, "[object RegExp]") \
+ MACRO_(objects, objects, "objects") \
+ MACRO_(objectString, objectString, "[object String]") \
+ MACRO_(objectSymbol, objectSymbol, "[object Symbol]") \
+ IF_RECORD_TUPLE(MACRO_(objectTuple, objectTuple, "[object Tuple]")) \
+ MACRO_(objectUndefined, objectUndefined, "[object Undefined]") \
+ MACRO_(Object_valueOf, Object_valueOf, "Object_valueOf") \
+ MACRO2(of, of, "of") \
+ MACRO_(offset, offset, "offset") \
+ MACRO_(one, one, "one") \
+ MACRO_(optimizedOut, optimizedOut, "optimizedOut") \
+ MACRO_(other, other, "other") \
+ MACRO_(outOfMemory, outOfMemory, "out of memory") \
+ MACRO_(ownKeys, ownKeys, "ownKeys") \
+ MACRO_(package, package, "package") \
+ MACRO_(parameters, parameters, "parameters") \
+ MACRO_(parseFloat, parseFloat, "parseFloat") \
+ MACRO_(parseInt, parseInt, "parseInt") \
+ MACRO_(pattern, pattern, "pattern") \
+ MACRO_(pending, pending, "pending") \
+ MACRO_(percentSign, percentSign, "percentSign") \
+ MACRO_(pipeTo, pipeTo, "pipeTo") \
+ MACRO_(plusSign, plusSign, "plusSign") \
+ MACRO_(preventAbort, preventAbort, "preventAbort") \
+ MACRO_(preventCancel, preventCancel, "preventCancel") \
+ MACRO_(preventClose, preventClose, "preventClose") \
+ MACRO_(preventExtensions, preventExtensions, "preventExtensions") \
+ MACRO_(private, private_, "private") \
+ MACRO_(promise, promise, "promise") \
+ MACRO_(propertyIsEnumerable, propertyIsEnumerable, "propertyIsEnumerable") \
+ MACRO_(protected, protected_, "protected") \
+ MACRO_(proto, proto, "__proto__") \
+ MACRO_(prototype, prototype, "prototype") \
+ MACRO_(proxy, proxy, "proxy") \
+ MACRO_(public, public_, "public") \
+ MACRO_(pull, pull, "pull") \
+ MACRO_(quarter, quarter, "quarter") \
+ MACRO_(raw, raw, "raw") \
+ MACRO_(reason, reason, "reason") \
+ MACRO_(RegExpFlagsGetter, RegExpFlagsGetter, "$RegExpFlagsGetter") \
+ MACRO_(RegExpStringIterator, RegExpStringIterator, "RegExp String Iterator") \
+ MACRO_(RegExpToString, RegExpToString, "$RegExpToString") \
+ MACRO_(RegExp_prototype_Exec, RegExp_prototype_Exec, \
+ "RegExp_prototype_Exec") \
+ MACRO_(region, region, "region") \
+ MACRO_(register, register_, "register") \
+ MACRO_(Reify, Reify, "Reify") \
+ MACRO_(reject, reject, "reject") \
+ MACRO_(rejected, rejected, "rejected") \
+ MACRO_(relatedYear, relatedYear, "relatedYear") \
+ MACRO_(RelativeTimeFormatFormat, RelativeTimeFormatFormat, \
+ "Intl_RelativeTimeFormat_Format") \
+ MACRO_(RequireObjectCoercible, RequireObjectCoercible, \
+ "RequireObjectCoercible") \
+ MACRO_(resolve, resolve, "resolve") \
+ MACRO_(result, result, "result") \
+ MACRO_(results, results, "results") \
+ MACRO_(resumeGenerator, resumeGenerator, "resumeGenerator") \
+ MACRO_(return, return_, "return") \
+ MACRO_(revoke, revoke, "revoke") \
+ MACRO_(roundingIncrement, roundingIncrement, "roundingIncrement") \
+ MACRO_(roundingMode, roundingMode, "roundingMode") \
+ MACRO_(roundingPriority, roundingPriority, "roundingPriority") \
+ MACRO_(script, script, "script") \
+ MACRO_(scripts, scripts, "scripts") \
+ MACRO_(second, second, "second") \
+ MACRO_(selfHosted, selfHosted, "self-hosted") \
+ MACRO_(sensitivity, sensitivity, "sensitivity") \
+ MACRO_(set, set, "set") \
+ IF_DECORATORS(MACRO_(setter, setter, "setter")) \
+ MACRO_(setBigInt64, setBigInt64, "setBigInt64") \
+ MACRO_(setBigUint64, setBigUint64, "setBigUint64") \
+ MACRO_(SetCanonicalName, SetCanonicalName, "SetCanonicalName") \
+ MACRO_(SetConstructorInit, SetConstructorInit, "SetConstructorInit") \
+ MACRO_(SetIsInlinableLargeFunction, SetIsInlinableLargeFunction, \
+ "SetIsInlinableLargeFunction") \
+ MACRO_(SetIterator, SetIterator, "Set Iterator") \
+ MACRO_(setPrototypeOf, setPrototypeOf, "setPrototypeOf") \
+ MACRO_(shape, shape, "shape") \
+ MACRO_(shared, shared, "shared") \
+ MACRO_(signal, signal, "signal") \
+ MACRO_(signDisplay, signDisplay, "signDisplay") \
+ MACRO_(size, size, "size") \
+ MACRO_(skeleton, skeleton, "skeleton") \
+ MACRO_(source, source, "source") \
+ MACRO_(SpeciesConstructor, SpeciesConstructor, "SpeciesConstructor") \
+ MACRO_(stack, stack, "stack") \
+ MACRO1(star, star, "*") \
+ MACRO_(starNamespaceStar, starNamespaceStar, "*namespace*") \
+ MACRO_(start, start, "start") \
+ MACRO_(startRange, startRange, "startRange") \
+ MACRO_(startTimestamp, startTimestamp, "startTimestamp") \
+ MACRO_(state, state, "state") \
+ MACRO_(static, static_, "static") \
+ MACRO_(status, status, "status") \
+ MACRO_(sticky, sticky, "sticky") \
+ MACRO_(StringIterator, StringIterator, "String Iterator") \
+ MACRO_(strings, strings, "strings") \
+ MACRO_(String_split, String_split, "String_split") \
+ MACRO_(StructType, StructType, "StructType") \
+ MACRO_(style, style, "style") \
+ MACRO_(super, super, "super") \
+ MACRO_(switch, switch_, "switch") \
+ MACRO_(symmetricDifference, symmetricDifference, "symmetricDifference") \
+ MACRO_(Symbol_iterator_fun, Symbol_iterator_fun, "[Symbol.iterator]") \
+ MACRO_(target, target, "target") \
+ MACRO_(test, test, "test") \
+ MACRO_(then, then, "then") \
+ MACRO_(this, this_, "this") \
+ MACRO_(throw, throw_, "throw") \
+ MACRO_(timestamp, timestamp, "timestamp") \
+ MACRO_(timeStyle, timeStyle, "timeStyle") \
+ MACRO_(timeZone, timeZone, "timeZone") \
+ MACRO_(timeZoneName, timeZoneName, "timeZoneName") \
+ MACRO_(toGMTString, toGMTString, "toGMTString") \
+ MACRO_(toISOString, toISOString, "toISOString") \
+ MACRO_(toJSON, toJSON, "toJSON") \
+ MACRO_(toLocaleString, toLocaleString, "toLocaleString") \
+ MACRO_(ToNumeric, ToNumeric, "ToNumeric") \
+ MACRO_(toSource, toSource, "toSource") \
+ MACRO_(toString, toString, "toString") \
+ MACRO_(ToString, ToString, "ToString") \
+ MACRO_(toUTCString, toUTCString, "toUTCString") \
+ MACRO_(trailingZeroDisplay, trailingZeroDisplay, "trailingZeroDisplay") \
+ MACRO_(trimEnd, trimEnd, "trimEnd") \
+ MACRO_(trimLeft, trimLeft, "trimLeft") \
+ MACRO_(trimRight, trimRight, "trimRight") \
+ MACRO_(trimStart, trimStart, "trimStart") \
+ MACRO_(true, true_, "true") \
+ MACRO_(try, try_, "try") \
+ MACRO_(two, two, "two") \
+ MACRO_(type, type, "type") \
+ MACRO_(typeof, typeof_, "typeof") \
+ MACRO_(uint8, uint8, "uint8") \
+ MACRO_(uint8Clamped, uint8Clamped, "uint8Clamped") \
+ MACRO_(uint16, uint16, "uint16") \
+ MACRO_(uint32, uint32, "uint32") \
+ MACRO_(Uint8x16, Uint8x16, "Uint8x16") \
+ MACRO_(Uint16x8, Uint16x8, "Uint16x8") \
+ MACRO_(Uint32x4, Uint32x4, "Uint32x4") \
+ MACRO_(unescape, unescape, "unescape") \
+ MACRO_(uneval, uneval, "uneval") \
+ MACRO_(unicode, unicode, "unicode") \
+ MACRO_(uninitialized, uninitialized, "uninitialized") \
+ MACRO_(union, union_, "union") \
+ MACRO_(unit, unit, "unit") \
+ MACRO_(unitDisplay, unitDisplay, "unitDisplay") \
+ MACRO_(unknown, unknown, "unknown") \
+ MACRO_(unregister, unregister, "unregister") \
+ MACRO_(UnsafeGetInt32FromReservedSlot, UnsafeGetInt32FromReservedSlot, \
+ "UnsafeGetInt32FromReservedSlot") \
+ MACRO_(UnsafeGetObjectFromReservedSlot, UnsafeGetObjectFromReservedSlot, \
+ "UnsafeGetObjectFromReservedSlot") \
+ MACRO_(UnsafeGetReservedSlot, UnsafeGetReservedSlot, \
+ "UnsafeGetReservedSlot") \
+ MACRO_(UnsafeGetStringFromReservedSlot, UnsafeGetStringFromReservedSlot, \
+ "UnsafeGetStringFromReservedSlot") \
+ MACRO_(UnsafeSetReservedSlot, UnsafeSetReservedSlot, \
+ "UnsafeSetReservedSlot") \
+ MACRO_(unsized, unsized, "unsized") \
+ MACRO_(unwatch, unwatch, "unwatch") \
+ MACRO_(url, url, "url") \
+ MACRO_(usage, usage, "usage") \
+ MACRO_(useAsm, useAsm, "use asm") \
+ MACRO_(useGrouping, useGrouping, "useGrouping") \
+ MACRO_(useStrict, useStrict, "use strict") \
+ MACRO_(UTC, UTC, "UTC") \
+ MACRO_(value, value, "value") \
+ MACRO_(valueOf, valueOf, "valueOf") \
+ MACRO_(values, values, "values") \
+ MACRO_(var, var, "var") \
+ MACRO_(variable, variable, "variable") \
+ MACRO_(void, void_, "void") \
+ MACRO_(void0, void0, "(void 0)") \
+ MACRO_(wasm, wasm, "wasm") \
+ MACRO_(WasmAnyRef, WasmAnyRef, "WasmAnyRef") \
+ MACRO_(wasmcall, wasmcall, "wasmcall") \
+ MACRO_(watch, watch, "watch") \
+ MACRO_(WeakMapConstructorInit, WeakMapConstructorInit, \
+ "WeakMapConstructorInit") \
+ MACRO_(WeakSetConstructorInit, WeakSetConstructorInit, \
+ "WeakSetConstructorInit") \
+ MACRO_(WeakSet_add, WeakSet_add, "WeakSet_add") \
+ MACRO_(week, week, "week") \
+ MACRO_(weekday, weekday, "weekday") \
+ MACRO_(weekend, weekend, "weekend") \
+ MACRO_(while, while_, "while") \
+ MACRO_(with, with, "with") \
+ MACRO_(toReversed, toReversed, "toReversed") \
+ MACRO_(toSorted, toSorted, "toSorted") \
+ MACRO_(toSpliced, toSpliced, "toSpliced") \
+ MACRO_(writable, writable, "writable") \
+ MACRO_(write, write, "write") \
+ MACRO_(year, year, "year") \
+ MACRO_(yearName, yearName, "yearName") \
+ MACRO_(yield, yield, "yield") \
+ MACRO_(zero, zero, "zero") \
+ /* Type names must be contiguous and ordered; see js::TypeName. */ \
+ MACRO_(undefined, undefined, "undefined") \
+ MACRO_(object, object, "object") \
+ MACRO_(function, function, "function") \
+ MACRO_(string, string, "string") \
+ MACRO_(number, number, "number") \
+ MACRO_(boolean, boolean, "boolean") \
+ MACRO_(symbol, symbol, "symbol") \
+ MACRO_(bigint, bigint, "bigint") \
+ IF_RECORD_TUPLE(MACRO_(record, record, "record")) \
+ IF_RECORD_TUPLE(MACRO_(tuple, tuple, "tuple"))
+
+#define PROPERTY_NAME_IGNORE(IDPART, ID, TEXT)
+
+#define FOR_EACH_LENGTH1_PROPERTYNAME(MACRO) \
+ FOR_EACH_COMMON_PROPERTYNAME_(PROPERTY_NAME_IGNORE, MACRO, \
+ PROPERTY_NAME_IGNORE, PROPERTY_NAME_IGNORE)
+
+#define FOR_EACH_LENGTH2_PROPERTYNAME(MACRO) \
+ FOR_EACH_COMMON_PROPERTYNAME_(PROPERTY_NAME_IGNORE, PROPERTY_NAME_IGNORE, \
+ MACRO, PROPERTY_NAME_IGNORE)
+
+#define FOR_EACH_NON_EMPTY_TINY_PROPERTYNAME(MACRO) \
+ FOR_EACH_COMMON_PROPERTYNAME_(PROPERTY_NAME_IGNORE, MACRO, MACRO, \
+ PROPERTY_NAME_IGNORE)
+
+#define FOR_EACH_TINY_PROPERTYNAME(MACRO) \
+ FOR_EACH_COMMON_PROPERTYNAME_(MACRO, MACRO, MACRO, PROPERTY_NAME_IGNORE)
+
+#define FOR_EACH_NONTINY_COMMON_PROPERTYNAME(MACRO) \
+ FOR_EACH_COMMON_PROPERTYNAME_(PROPERTY_NAME_IGNORE, PROPERTY_NAME_IGNORE, \
+ PROPERTY_NAME_IGNORE, MACRO)
+
+#define FOR_EACH_COMMON_PROPERTYNAME(MACRO) \
+ FOR_EACH_COMMON_PROPERTYNAME_(MACRO, MACRO, MACRO, MACRO)
+
+#endif /* vm_CommonPropertyNames_h */
diff --git a/js/src/vm/Compartment-inl.h b/js/src/vm/Compartment-inl.h
new file mode 100644
index 0000000000..43aff52750
--- /dev/null
+++ b/js/src/vm/Compartment-inl.h
@@ -0,0 +1,442 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Compartment_inl_h
+#define vm_Compartment_inl_h
+
+#include "vm/Compartment.h"
+
+#include <type_traits>
+
+#include "jsapi.h"
+#include "jsfriendapi.h"
+#include "jsnum.h"
+#include "js/CallArgs.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/Wrapper.h"
+#include "vm/Iteration.h"
+#include "vm/JSObject.h"
+
+#include "vm/JSContext-inl.h"
+
+struct JSClass;
+
+inline js::StringWrapperMap::Ptr JS::Compartment::lookupWrapper(
+ JSString* str) const {
+ return zone()->crossZoneStringWrappers().lookup(str);
+}
+
+inline bool JS::Compartment::wrap(JSContext* cx, JS::MutableHandleValue vp) {
+ /* Only GC things have to be wrapped or copied. */
+ if (!vp.isGCThing()) {
+ return true;
+ }
+
+ /*
+ * Symbols are GC things, but never need to be wrapped or copied because
+ * they are always allocated in the atoms zone. They still need to be
+ * marked in the new compartment's zone, however.
+ */
+ if (vp.isSymbol()) {
+ cx->markAtomValue(vp);
+ return true;
+ }
+
+ /* Handle strings. */
+ if (vp.isString()) {
+ JS::RootedString str(cx, vp.toString());
+ if (!wrap(cx, &str)) {
+ return false;
+ }
+ vp.setString(str);
+ return true;
+ }
+
+ if (vp.isBigInt()) {
+ JS::RootedBigInt bi(cx, vp.toBigInt());
+ if (!wrap(cx, &bi)) {
+ return false;
+ }
+ vp.setBigInt(bi);
+ return true;
+ }
+
+#ifdef ENABLE_RECORD_TUPLE
+ if (vp.isExtendedPrimitive()) {
+ JS::RootedObject extPrim(cx, &vp.toExtendedPrimitive());
+ if (!wrapExtendedPrimitive(cx, &extPrim)) {
+ return false;
+ }
+ vp.setExtendedPrimitive(*extPrim);
+ return true;
+ }
+#endif
+
+ MOZ_ASSERT(vp.isObject());
+
+ /*
+ * All that's left are objects.
+ *
+ * Object wrapping isn't the fastest thing in the world, in part because
+ * we have to unwrap and invoke the prewrap hook to find the identity
+ * object before we even start checking the cache. Neither of these
+ * operations are needed in the common case, where we're just wrapping
+ * a plain JS object from the wrappee's side of the membrane to the
+ * wrapper's side.
+ *
+ * To optimize this, we note that the cache should only ever contain
+ * identity objects - that is to say, objects that serve as the
+ * canonical representation for a unique object identity observable by
+ * script. Unwrap and prewrap are both steps that we take to get to the
+ * identity of an incoming objects, and as such, they shuld never map
+ * one identity object to another object. This means that we can safely
+ * check the cache immediately, and only risk false negatives. Do this
+ * in opt builds, and do both in debug builds so that we can assert
+ * that we get the same answer.
+ */
+#ifdef DEBUG
+ JS::AssertValueIsNotGray(vp);
+ JS::RootedObject cacheResult(cx);
+#endif
+ if (js::ObjectWrapperMap::Ptr p = lookupWrapper(&vp.toObject())) {
+#ifdef DEBUG
+ cacheResult = p->value().get();
+#else
+ vp.setObject(*p->value().get());
+ return true;
+#endif
+ }
+
+ JS::RootedObject obj(cx, &vp.toObject());
+ if (!wrap(cx, &obj)) {
+ return false;
+ }
+ vp.setObject(*obj);
+ MOZ_ASSERT_IF(cacheResult, obj == cacheResult);
+ return true;
+}
+
+inline bool JS::Compartment::wrap(JSContext* cx,
+ MutableHandle<mozilla::Maybe<Value>> vp) {
+ if (vp.get().isNothing()) {
+ return true;
+ }
+
+ return wrap(cx, MutableHandle<Value>::fromMarkedLocation(vp.get().ptr()));
+}
+
+namespace js {
+namespace detail {
+
+/**
+ * Return the name of class T as a static null-terminated ASCII string constant
+ * (for error messages).
+ */
+template <class T>
+const char* ClassName() {
+ return T::class_.name;
+}
+
+template <class T, class ErrorCallback>
+[[nodiscard]] T* UnwrapAndTypeCheckValueSlowPath(JSContext* cx,
+ HandleValue value,
+ ErrorCallback throwTypeError) {
+ JSObject* obj = nullptr;
+ if (value.isObject()) {
+ obj = &value.toObject();
+ if (IsWrapper(obj)) {
+ obj = CheckedUnwrapStatic(obj);
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return nullptr;
+ }
+ }
+ }
+
+ if (!obj || !obj->is<T>()) {
+ throwTypeError();
+ return nullptr;
+ }
+
+ return &obj->as<T>();
+}
+
+template <class ErrorCallback>
+[[nodiscard]] JSObject* UnwrapAndTypeCheckValueSlowPath(
+ JSContext* cx, HandleValue value, const JSClass* clasp,
+ ErrorCallback throwTypeError) {
+ JSObject* obj = nullptr;
+ if (value.isObject()) {
+ obj = &value.toObject();
+ if (IsWrapper(obj)) {
+ obj = CheckedUnwrapStatic(obj);
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return nullptr;
+ }
+ }
+ }
+
+ if (!obj || !obj->hasClass(clasp)) {
+ throwTypeError();
+ return nullptr;
+ }
+
+ return obj;
+}
+
+} // namespace detail
+
+/**
+ * Remove all wrappers from `val` and try to downcast the result to class `T`.
+ *
+ * DANGER: The result may not be same-compartment with `cx`.
+ *
+ * This calls `throwTypeError` if the value isn't an object, cannot be
+ * unwrapped, or isn't an instance of the expected type. `throwTypeError` must
+ * in fact throw a TypeError (or OOM trying).
+ */
+template <class T, class ErrorCallback>
+[[nodiscard]] inline T* UnwrapAndTypeCheckValue(JSContext* cx,
+ HandleValue value,
+ ErrorCallback throwTypeError) {
+ cx->check(value);
+
+ static_assert(!std::is_convertible_v<T*, Wrapper*>,
+ "T can't be a Wrapper type; this function discards wrappers");
+
+ if (value.isObject() && value.toObject().is<T>()) {
+ return &value.toObject().as<T>();
+ }
+
+ return detail::UnwrapAndTypeCheckValueSlowPath<T>(cx, value, throwTypeError);
+}
+
+/**
+ * Remove all wrappers from |val| and try to downcast the result to an object of
+ * the class |clasp|.
+ *
+ * DANGER: The result may not be same-compartment with |cx|.
+ *
+ * This calls |throwTypeError| if the value isn't an object, cannot be
+ * unwrapped, or isn't an instance of the expected type. |throwTypeError| must
+ * in fact throw a TypeError (or OOM trying).
+ */
+template <class ErrorCallback>
+[[nodiscard]] inline JSObject* UnwrapAndTypeCheckValue(
+ JSContext* cx, HandleValue value, const JSClass* clasp,
+ ErrorCallback throwTypeError) {
+ cx->check(value);
+
+ if (value.isObject() && value.toObject().hasClass(clasp)) {
+ return &value.toObject();
+ }
+
+ return detail::UnwrapAndTypeCheckValueSlowPath(cx, value, clasp,
+ throwTypeError);
+}
+
+/**
+ * Remove all wrappers from `args.thisv()` and try to downcast the result to
+ * class `T`.
+ *
+ * DANGER: The result may not be same-compartment with `cx`.
+ *
+ * This throws a TypeError if the value isn't an object, cannot be unwrapped,
+ * or isn't an instance of the expected type.
+ */
+template <class T>
+[[nodiscard]] inline T* UnwrapAndTypeCheckThis(JSContext* cx,
+ const CallArgs& args,
+ const char* methodName) {
+ HandleValue thisv = args.thisv();
+ return UnwrapAndTypeCheckValue<T>(cx, thisv, [cx, methodName, thisv] {
+ JS_ReportErrorNumberLatin1(cx, GetErrorMessage, nullptr,
+ JSMSG_INCOMPATIBLE_PROTO, detail::ClassName<T>(),
+ methodName, InformalValueTypeName(thisv));
+ });
+}
+
+/**
+ * Remove all wrappers from `args[argIndex]` and try to downcast the result to
+ * class `T`.
+ *
+ * DANGER: The result may not be same-compartment with `cx`.
+ *
+ * This throws a TypeError if the specified argument is missing, isn't an
+ * object, cannot be unwrapped, or isn't an instance of the expected type.
+ */
+template <class T>
+[[nodiscard]] inline T* UnwrapAndTypeCheckArgument(JSContext* cx,
+ CallArgs& args,
+ const char* methodName,
+ int argIndex) {
+ HandleValue val = args.get(argIndex);
+ return UnwrapAndTypeCheckValue<T>(cx, val, [cx, val, methodName, argIndex] {
+ Int32ToCStringBuf cbuf;
+ char* numStr = Int32ToCString(&cbuf, argIndex + 1);
+ MOZ_ASSERT(numStr);
+ JS_ReportErrorNumberLatin1(
+ cx, GetErrorMessage, nullptr, JSMSG_WRONG_TYPE_ARG, numStr, methodName,
+ detail::ClassName<T>(), InformalValueTypeName(val));
+ });
+}
+
+/**
+ * Unwrap an object of a known type.
+ *
+ * If `obj` is an object of class T, this returns a pointer to that object. If
+ * `obj` is a wrapper for such an object, this tries to unwrap the object and
+ * return a pointer to it. If access is denied, or `obj` was a wrapper but has
+ * been nuked, this reports an error and returns null.
+ *
+ * In all other cases, the behavior is undefined, so call this only if `obj` is
+ * known to have been an object of class T, or a wrapper to a T, at some point.
+ */
+template <class T>
+[[nodiscard]] inline T* UnwrapAndDowncastObject(JSContext* cx, JSObject* obj) {
+ static_assert(!std::is_convertible_v<T*, Wrapper*>,
+ "T can't be a Wrapper type; this function discards wrappers");
+
+ if (IsProxy(obj)) {
+ if (JS_IsDeadWrapper(obj)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_DEAD_OBJECT);
+ return nullptr;
+ }
+
+ // It would probably be OK to do an unchecked unwrap here, but we allow
+ // arbitrary security policies, so check anyway.
+ obj = obj->maybeUnwrapAs<T>();
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return nullptr;
+ }
+ }
+
+ return &obj->as<T>();
+}
+
+/**
+ * Unwrap an object of a known (but not compile-time-known) class.
+ *
+ * If |obj| is an object with class |clasp|, this returns |obj|. If |obj| is a
+ * wrapper for such an object, this tries to unwrap the object and return a
+ * pointer to it. If access is denied, or |obj| was a wrapper but has been
+ * nuked, this reports an error and returns null.
+ *
+ * In all other cases, the behavior is undefined, so call this only if |obj| is
+ * known to have had class |clasp|, or been a wrapper to such an object, at some
+ * point.
+ */
+[[nodiscard]] inline JSObject* UnwrapAndDowncastObject(JSContext* cx,
+ JSObject* obj,
+ const JSClass* clasp) {
+ if (IsProxy(obj)) {
+ if (JS_IsDeadWrapper(obj)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_DEAD_OBJECT);
+ return nullptr;
+ }
+
+ // It would probably be OK to do an unchecked unwrap here, but we allow
+ // arbitrary security policies, so check anyway.
+ obj = obj->maybeUnwrapAs(clasp);
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return nullptr;
+ }
+ }
+
+ MOZ_ASSERT(obj->hasClass(clasp));
+ return obj;
+}
+
+/**
+ * Unwrap a value of a known type. See UnwrapAndDowncastObject.
+ */
+template <class T>
+[[nodiscard]] inline T* UnwrapAndDowncastValue(JSContext* cx,
+ const Value& value) {
+ return UnwrapAndDowncastObject<T>(cx, &value.toObject());
+}
+
+/**
+ * Unwrap an object of a known (but not compile-time-known) class. See
+ * UnwrapAndDowncastObject.
+ */
+[[nodiscard]] inline JSObject* UnwrapAndDowncastValue(JSContext* cx,
+ const Value& value,
+ const JSClass* clasp) {
+ return UnwrapAndDowncastObject(cx, &value.toObject(), clasp);
+}
+
+/**
+ * Read a private slot that is known to point to a particular type of object.
+ *
+ * Some internal slots specified in various standards effectively have static
+ * types. For example, the [[ownerReadableStream]] slot of a stream reader is
+ * guaranteed to be a ReadableStream. However, because of compartments, we
+ * sometimes store a cross-compartment wrapper in that slot. And since wrappers
+ * can be nuked, that wrapper may become a dead object proxy.
+ *
+ * UnwrapInternalSlot() copes with the cross-compartment and dead object cases,
+ * but not plain bugs where the slot hasn't been initialized or doesn't contain
+ * the expected type of object. Call this only if the slot is certain to
+ * contain either an instance of T, a wrapper for a T, or a dead object.
+ *
+ * `cx` and `unwrappedObj` are not required to be same-compartment.
+ *
+ * DANGER: The result may not be same-compartment with either `cx` or `obj`.
+ */
+template <class T>
+[[nodiscard]] inline T* UnwrapInternalSlot(JSContext* cx,
+ Handle<NativeObject*> unwrappedObj,
+ uint32_t slot) {
+ static_assert(!std::is_convertible_v<T*, Wrapper*>,
+ "T can't be a Wrapper type; this function discards wrappers");
+
+ return UnwrapAndDowncastValue<T>(cx, unwrappedObj->getFixedSlot(slot));
+}
+
+/**
+ * Read a function slot that is known to point to a particular type of object.
+ *
+ * This is like UnwrapInternalSlot, but for extended function slots. Call this
+ * only if the specified slot is known to have been initialized with an object
+ * of class T or a wrapper for such an object.
+ *
+ * DANGER: The result may not be same-compartment with `cx`.
+ */
+template <class T>
+[[nodiscard]] T* UnwrapCalleeSlot(JSContext* cx, CallArgs& args,
+ size_t extendedSlot) {
+ JSFunction& func = args.callee().as<JSFunction>();
+ return UnwrapAndDowncastValue<T>(cx, func.getExtendedSlot(extendedSlot));
+}
+
+} // namespace js
+
+MOZ_ALWAYS_INLINE bool JS::Compartment::objectMaybeInIteration(JSObject* obj) {
+ MOZ_ASSERT(obj->compartment() == this);
+
+ js::NativeIteratorListIter iter(&enumerators_);
+
+ // If the list is empty, we're not iterating any objects.
+ if (iter.done()) {
+ return false;
+ }
+
+ // If the list contains a single object, check if it's |obj|.
+ js::NativeIterator* next = iter.next();
+ if (iter.done()) {
+ return next->objectBeingIterated() == obj;
+ }
+
+ return true;
+}
+
+#endif /* vm_Compartment_inl_h */
diff --git a/js/src/vm/Compartment.cpp b/js/src/vm/Compartment.cpp
new file mode 100644
index 0000000000..a99a9145c7
--- /dev/null
+++ b/js/src/vm/Compartment.cpp
@@ -0,0 +1,616 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/Compartment-inl.h"
+
+#include "mozilla/MemoryReporting.h"
+
+#include <stddef.h>
+
+#include "jsfriendapi.h"
+
+#include "debugger/DebugAPI.h"
+#include "gc/GC.h"
+#include "gc/Memory.h"
+#include "gc/PublicIterators.h"
+#include "gc/Zone.h"
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
+#include "js/friend/WindowProxy.h" // js::IsWindow, js::IsWindowProxy, js::ToWindowProxyIfWindow
+#include "js/Proxy.h"
+#include "js/RootingAPI.h"
+#include "js/StableStringChars.h"
+#include "js/Wrapper.h"
+#include "js/WrapperCallbacks.h"
+#include "proxy/DeadObjectProxy.h"
+#include "proxy/DOMProxy.h"
+#include "vm/JSContext.h"
+#ifdef ENABLE_RECORD_TUPLE
+# include "vm/RecordTupleShared.h"
+#endif
+#include "vm/WrapperObject.h"
+
+#include "gc/Marking-inl.h"
+#include "gc/WeakMap-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/Realm-inl.h"
+
+using namespace js;
+
+using JS::AutoStableStringChars;
+
+Compartment::Compartment(Zone* zone, bool invisibleToDebugger)
+ : zone_(zone),
+ runtime_(zone->runtimeFromAnyThread()),
+ invisibleToDebugger_(invisibleToDebugger),
+ crossCompartmentObjectWrappers(zone, 0),
+ realms_(zone) {}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+
+void Compartment::checkObjectWrappersAfterMovingGC() {
+ for (ObjectWrapperEnum e(this); !e.empty(); e.popFront()) {
+ // Assert that the postbarriers have worked and that nothing is left in the
+ // wrapper map that points into the nursery, and that the hash table entries
+ // are discoverable.
+ auto key = e.front().key();
+ CheckGCThingAfterMovingGC(key.get());
+
+ auto ptr = crossCompartmentObjectWrappers.lookup(key);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &e.front());
+ }
+}
+
+#endif // JSGC_HASH_TABLE_CHECKS
+
+bool Compartment::putWrapper(JSContext* cx, JSObject* wrapped,
+ JSObject* wrapper) {
+ MOZ_ASSERT(!js::IsProxy(wrapper) || js::GetProxyHandler(wrapper)->family() !=
+ js::GetDOMRemoteProxyHandlerFamily());
+
+ if (!crossCompartmentObjectWrappers.put(wrapped, wrapper)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool Compartment::putWrapper(JSContext* cx, JSString* wrapped,
+ JSString* wrapper) {
+ if (!zone()->crossZoneStringWrappers().put(wrapped, wrapper)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+void Compartment::removeWrapper(js::ObjectWrapperMap::Ptr p) {
+ JSObject* key = p->key();
+ JSObject* value = p->value().unbarrieredGet();
+ if (js::gc::detail::GetDelegate(value) == key) {
+ key->zone()->beforeClearDelegate(value, key);
+ }
+
+ crossCompartmentObjectWrappers.remove(p);
+}
+
+JSString* js::CopyStringPure(JSContext* cx, JSString* str) {
+ /*
+ * Directly allocate the copy in the destination compartment, rather than
+ * first flattening it (and possibly allocating in source compartment),
+ * because we don't know whether the flattening will pay off later.
+ */
+
+ size_t len = str->length();
+ JSString* copy;
+ if (str->isLinear()) {
+ /* Only use AutoStableStringChars if the NoGC allocation fails. */
+ if (str->hasLatin1Chars()) {
+ JS::AutoCheckCannotGC nogc;
+ copy = NewStringCopyN<NoGC>(cx, str->asLinear().latin1Chars(nogc), len);
+ } else {
+ JS::AutoCheckCannotGC nogc;
+ copy = NewStringCopyNDontDeflate<NoGC>(
+ cx, str->asLinear().twoByteChars(nogc), len);
+ }
+ if (copy) {
+ return copy;
+ }
+
+ AutoStableStringChars chars(cx);
+ if (!chars.init(cx, str)) {
+ return nullptr;
+ }
+
+ return chars.isLatin1() ? NewStringCopyN<CanGC>(
+ cx, chars.latin1Range().begin().get(), len)
+ : NewStringCopyNDontDeflate<CanGC>(
+ cx, chars.twoByteRange().begin().get(), len);
+ }
+
+ if (str->hasLatin1Chars()) {
+ UniquePtr<Latin1Char[], JS::FreePolicy> copiedChars =
+ str->asRope().copyLatin1Chars(cx, js::StringBufferArena);
+ if (!copiedChars) {
+ return nullptr;
+ }
+
+ return NewString<CanGC>(cx, std::move(copiedChars), len);
+ }
+
+ UniqueTwoByteChars copiedChars =
+ str->asRope().copyTwoByteChars(cx, js::StringBufferArena);
+ if (!copiedChars) {
+ return nullptr;
+ }
+
+ return NewStringDontDeflate<CanGC>(cx, std::move(copiedChars), len);
+}
+
+bool Compartment::wrap(JSContext* cx, MutableHandleString strp) {
+ MOZ_ASSERT(cx->compartment() == this);
+
+ /* If the string is already in this compartment, we are done. */
+ JSString* str = strp;
+ if (str->zoneFromAnyThread() == zone()) {
+ return true;
+ }
+
+ /*
+ * If the string is an atom, we don't have to copy, but we do need to mark
+ * the atom as being in use by the new zone.
+ */
+ if (str->isAtom()) {
+ cx->markAtom(&str->asAtom());
+ return true;
+ }
+
+ /* Check the cache. */
+ if (StringWrapperMap::Ptr p = lookupWrapper(str)) {
+ strp.set(p->value().get());
+ return true;
+ }
+
+ /* No dice. Make a copy, and cache it. */
+ JSString* copy = CopyStringPure(cx, str);
+ if (!copy) {
+ return false;
+ }
+ if (!putWrapper(cx, strp, copy)) {
+ return false;
+ }
+
+ strp.set(copy);
+ return true;
+}
+
+bool Compartment::wrap(JSContext* cx, MutableHandleBigInt bi) {
+ MOZ_ASSERT(cx->compartment() == this);
+
+ if (bi->zone() == cx->zone()) {
+ return true;
+ }
+
+ BigInt* copy = BigInt::copy(cx, bi);
+ if (!copy) {
+ return false;
+ }
+ bi.set(copy);
+ return true;
+}
+
+bool Compartment::getNonWrapperObjectForCurrentCompartment(
+ JSContext* cx, HandleObject origObj, MutableHandleObject obj) {
+ // Ensure that we have entered a realm.
+ MOZ_ASSERT(cx->global());
+
+ // The object is already in the right compartment. Normally same-
+ // compartment returns the object itself, however, windows are always
+ // wrapped by a proxy, so we have to check for that case here manually.
+ if (obj->compartment() == this) {
+ obj.set(ToWindowProxyIfWindow(obj));
+ return true;
+ }
+
+ // Note that if the object is same-compartment, but has been wrapped into a
+ // different compartment, we need to unwrap it and return the bare same-
+ // compartment object. Note again that windows are always wrapped by a
+ // WindowProxy even when same-compartment so take care not to strip this
+ // particular wrapper.
+ RootedObject objectPassedToWrap(cx, obj);
+ obj.set(UncheckedUnwrap(obj, /* stopAtWindowProxy = */ true));
+ if (obj->compartment() == this) {
+ MOZ_ASSERT(!IsWindow(obj));
+ return true;
+ }
+
+ // Disallow creating new wrappers if we nuked the object's realm or the
+ // current compartment.
+ if (!AllowNewWrapper(this, obj)) {
+ obj.set(NewDeadProxyObject(cx, obj));
+ return !!obj;
+ }
+
+ // Use the WindowProxy instead of the Window here, so that we don't have to
+ // deal with this in the rest of the wrapping code.
+ if (IsWindow(obj)) {
+ obj.set(ToWindowProxyIfWindow(obj));
+
+ // ToWindowProxyIfWindow can return a CCW if |obj| was a navigated-away-from
+ // Window. Strip any CCWs.
+ obj.set(UncheckedUnwrap(obj));
+
+ if (JS_IsDeadWrapper(obj)) {
+ obj.set(NewDeadProxyObject(cx, obj));
+ return !!obj;
+ }
+
+ MOZ_ASSERT(IsWindowProxy(obj) || IsDOMRemoteProxyObject(obj));
+
+ // We crossed a compartment boundary there, so may now have a gray object.
+ // This function is not allowed to return gray objects, so don't do that.
+ ExposeObjectToActiveJS(obj);
+ }
+
+ // If the object is a dead wrapper, return a new dead wrapper rather than
+ // trying to wrap it for a different compartment.
+ if (JS_IsDeadWrapper(obj)) {
+ obj.set(NewDeadProxyObject(cx, obj));
+ return !!obj;
+ }
+
+ // Invoke the prewrap callback. The prewrap callback is responsible for
+ // doing similar reification as above, but can account for any additional
+ // embedder requirements.
+ //
+ // We're a bit worried about infinite recursion here, so we do a check -
+ // see bug 809295.
+ auto preWrap = cx->runtime()->wrapObjectCallbacks->preWrap;
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.checkSystem(cx)) {
+ return false;
+ }
+ if (preWrap) {
+ preWrap(cx, cx->global(), origObj, obj, objectPassedToWrap, obj);
+ if (!obj) {
+ return false;
+ }
+ }
+ MOZ_ASSERT(!IsWindow(obj));
+
+ return true;
+}
+
+bool Compartment::getOrCreateWrapper(JSContext* cx, HandleObject existing,
+ MutableHandleObject obj) {
+ // ScriptSourceObject is an internal object that we never need to wrap.
+ MOZ_ASSERT(!obj->is<ScriptSourceObject>());
+
+ // If we already have a wrapper for this value, use it.
+ if (ObjectWrapperMap::Ptr p = lookupWrapper(obj)) {
+ obj.set(p->value().get());
+ MOZ_ASSERT(obj->is<CrossCompartmentWrapperObject>());
+ return true;
+ }
+
+ // Ensure that the wrappee is exposed in case we are creating a new wrapper
+ // for a gray object.
+ ExposeObjectToActiveJS(obj);
+
+ // Create a new wrapper for the object.
+ auto wrap = cx->runtime()->wrapObjectCallbacks->wrap;
+ RootedObject wrapper(cx, wrap(cx, existing, obj));
+ if (!wrapper) {
+ return false;
+ }
+
+ // We maintain the invariant that the key in the cross-compartment wrapper
+ // map is always directly wrapped by the value.
+ MOZ_ASSERT(Wrapper::wrappedObject(wrapper) == obj);
+
+ if (!putWrapper(cx, obj, wrapper)) {
+ // Enforce the invariant that all cross-compartment wrapper object are
+ // in the map by nuking the wrapper if we couldn't add it.
+ // Unfortunately it's possible for the wrapper to still be marked if we
+ // took this path, for example if the object metadata callback stashes a
+ // reference to it.
+ if (wrapper->is<CrossCompartmentWrapperObject>()) {
+ NukeCrossCompartmentWrapper(cx, wrapper);
+ }
+ return false;
+ }
+
+ obj.set(wrapper);
+ return true;
+}
+
+#ifdef ENABLE_RECORD_TUPLE
+bool Compartment::wrapExtendedPrimitive(JSContext* cx,
+ MutableHandleObject obj) {
+ MOZ_ASSERT(IsExtendedPrimitive(*obj));
+ MOZ_ASSERT(cx->compartment() == this);
+
+ if (obj->compartment() == this) {
+ return true;
+ }
+
+ JSObject* copy = CopyExtendedPrimitive(cx, obj);
+ if (!copy) {
+ return false;
+ }
+
+ obj.set(copy);
+ return true;
+}
+#endif
+
+bool Compartment::wrap(JSContext* cx, MutableHandleObject obj) {
+ MOZ_ASSERT(cx->compartment() == this);
+
+ if (!obj) {
+ return true;
+ }
+
+#ifdef ENABLE_RECORD_TUPLE
+ MOZ_ASSERT(!IsExtendedPrimitive(*obj));
+#endif
+
+ AutoDisableProxyCheck adpc;
+
+ // Anything we're wrapping has already escaped into script, so must have
+ // been unmarked-gray at some point in the past.
+ JS::AssertObjectIsNotGray(obj);
+
+ // The passed object may already be wrapped, or may fit a number of special
+ // cases that we need to check for and manually correct.
+ if (!getNonWrapperObjectForCurrentCompartment(cx, /* origObj = */ nullptr,
+ obj)) {
+ return false;
+ }
+
+ // If the reification above did not result in a same-compartment object,
+ // get or create a new wrapper object in this compartment for it.
+ if (obj->compartment() != this) {
+ if (!getOrCreateWrapper(cx, nullptr, obj)) {
+ return false;
+ }
+ }
+
+ // Ensure that the wrapper is also exposed.
+ ExposeObjectToActiveJS(obj);
+ return true;
+}
+
+bool Compartment::rewrap(JSContext* cx, MutableHandleObject obj,
+ HandleObject existingArg) {
+ MOZ_ASSERT(cx->compartment() == this);
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(existingArg);
+ MOZ_ASSERT(existingArg->compartment() == cx->compartment());
+ MOZ_ASSERT(IsDeadProxyObject(existingArg));
+
+ AutoDisableProxyCheck adpc;
+
+ // It may not be possible to re-use existing; if so, clear it so that we
+ // are forced to create a new wrapper. Note that this cannot call out to
+ // |wrap| because of the different gray unmarking semantics.
+ RootedObject existing(cx, existingArg);
+ if (existing->hasStaticPrototype() ||
+ // Note: Class asserted above, so all that's left to check is callability
+ existing->isCallable() || obj->isCallable()) {
+ existing.set(nullptr);
+ }
+
+ // The passed object may already be wrapped, or may fit a number of special
+ // cases that we need to check for and manually correct. We pass in
+ // |existingArg| instead of |existing|, because the purpose is to get the
+ // address of the object we are transplanting onto, not to find a wrapper
+ // to reuse.
+ if (!getNonWrapperObjectForCurrentCompartment(cx, existingArg, obj)) {
+ return false;
+ }
+
+ // If the reification above resulted in a same-compartment object, we do
+ // not need to create or return an existing wrapper.
+ if (obj->compartment() == this) {
+ return true;
+ }
+
+ return getOrCreateWrapper(cx, existing, obj);
+}
+
+bool Compartment::wrap(JSContext* cx,
+ MutableHandle<JS::PropertyDescriptor> desc) {
+ if (desc.hasGetter()) {
+ if (!wrap(cx, desc.getter())) {
+ return false;
+ }
+ }
+ if (desc.hasSetter()) {
+ if (!wrap(cx, desc.setter())) {
+ return false;
+ }
+ }
+ if (desc.hasValue()) {
+ if (!wrap(cx, desc.value())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Compartment::wrap(JSContext* cx,
+ MutableHandle<mozilla::Maybe<PropertyDescriptor>> desc) {
+ if (desc.isNothing()) {
+ return true;
+ }
+
+ Rooted<PropertyDescriptor> desc_(cx, *desc);
+ if (!wrap(cx, &desc_)) {
+ return false;
+ }
+ desc.set(mozilla::Some(desc_.get()));
+ return true;
+}
+
+bool Compartment::wrap(JSContext* cx, MutableHandle<GCVector<Value>> vec) {
+ for (size_t i = 0; i < vec.length(); ++i) {
+ if (!wrap(cx, vec[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static inline bool ShouldTraceWrapper(JSObject* wrapper,
+ Compartment::EdgeSelector whichEdges) {
+ if (whichEdges == Compartment::AllEdges) {
+ return true;
+ }
+
+ bool isGray = wrapper->isMarkedGray();
+ return (whichEdges == Compartment::NonGrayEdges && !isGray) ||
+ (whichEdges == Compartment::GrayEdges && isGray);
+}
+
+void Compartment::traceWrapperTargetsInCollectedZones(JSTracer* trc,
+ EdgeSelector whichEdges) {
+ // Trace cross compartment wrapper private pointers into collected zones to
+ // either mark or update them. Wrapped object pointers are updated by
+ // sweepCrossCompartmentObjectWrappers().
+
+ MOZ_ASSERT(JS::RuntimeHeapIsMajorCollecting());
+ MOZ_ASSERT(!zone()->isCollectingFromAnyThread() ||
+ trc->runtime()->gc.isHeapCompacting());
+
+ for (WrappedObjectCompartmentEnum c(this); !c.empty(); c.popFront()) {
+ Zone* zone = c.front()->zone();
+ if (!zone->isCollectingFromAnyThread()) {
+ continue;
+ }
+
+ for (ObjectWrapperEnum e(this, c); !e.empty(); e.popFront()) {
+ JSObject* obj = e.front().value().unbarrieredGet();
+ ProxyObject* wrapper = &obj->as<ProxyObject>();
+ if (ShouldTraceWrapper(wrapper, whichEdges)) {
+ ProxyObject::traceEdgeToTarget(trc, wrapper);
+ }
+ }
+ }
+}
+
+/* static */
+void Compartment::traceIncomingCrossCompartmentEdgesForZoneGC(
+ JSTracer* trc, EdgeSelector whichEdges) {
+ MOZ_ASSERT(JS::RuntimeHeapIsMajorCollecting());
+
+ for (ZonesIter zone(trc->runtime(), SkipAtoms); !zone.done(); zone.next()) {
+ if (zone->isCollectingFromAnyThread()) {
+ continue;
+ }
+
+ for (CompartmentsInZoneIter c(zone); !c.done(); c.next()) {
+ c->traceWrapperTargetsInCollectedZones(trc, whichEdges);
+ }
+ }
+
+ // Currently we trace all debugger edges as black.
+ if (whichEdges != GrayEdges) {
+ DebugAPI::traceCrossCompartmentEdges(trc);
+ }
+}
+
+void Compartment::sweepAfterMinorGC(JSTracer* trc) {
+ crossCompartmentObjectWrappers.sweepAfterMinorGC(trc);
+
+ for (RealmsInCompartmentIter r(this); !r.done(); r.next()) {
+ r->sweepAfterMinorGC(trc);
+ }
+}
+
+// Remove dead wrappers from the table or update pointers to moved objects.
+void Compartment::traceCrossCompartmentObjectWrapperEdges(JSTracer* trc) {
+ crossCompartmentObjectWrappers.traceWeak(trc);
+}
+
+void Compartment::fixupCrossCompartmentObjectWrappersAfterMovingGC(
+ JSTracer* trc) {
+ MOZ_ASSERT(trc->runtime()->gc.isHeapCompacting());
+
+ // Sweep the wrapper map to update keys (wrapped values) in other
+ // compartments that may have been moved.
+ traceCrossCompartmentObjectWrapperEdges(trc);
+
+ // Trace the wrappers in the map to update their cross-compartment edges
+ // to wrapped values in other compartments that may have been moved.
+ traceWrapperTargetsInCollectedZones(trc, AllEdges);
+}
+
+void Compartment::fixupAfterMovingGC(JSTracer* trc) {
+ MOZ_ASSERT(zone()->isGCCompacting());
+
+ for (RealmsInCompartmentIter r(this); !r.done(); r.next()) {
+ r->fixupAfterMovingGC(trc);
+ }
+
+ // Sweep the wrapper map to update values (wrapper objects) in this
+ // compartment that may have been moved.
+ traceCrossCompartmentObjectWrapperEdges(trc);
+}
+
+void Compartment::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* compartmentObjects,
+ size_t* crossCompartmentWrappersTables,
+ size_t* compartmentsPrivateData) {
+ *compartmentObjects += mallocSizeOf(this);
+ *crossCompartmentWrappersTables +=
+ crossCompartmentObjectWrappers.sizeOfExcludingThis(mallocSizeOf);
+
+ if (auto callback = runtime_->sizeOfIncludingThisCompartmentCallback) {
+ *compartmentsPrivateData += callback(mallocSizeOf, this);
+ }
+}
+
+GlobalObject& Compartment::firstGlobal() const {
+ for (Realm* realm : realms_) {
+ if (!realm->hasInitializedGlobal()) {
+ continue;
+ }
+ GlobalObject* global = realm->maybeGlobal();
+ ExposeObjectToActiveJS(global);
+ return *global;
+ }
+ MOZ_CRASH("If all our globals are dead, why is someone expecting a global?");
+}
+
+JS_PUBLIC_API JSObject* js::GetFirstGlobalInCompartment(JS::Compartment* comp) {
+ return &comp->firstGlobal();
+}
+
+JS_PUBLIC_API bool js::CompartmentHasLiveGlobal(JS::Compartment* comp) {
+ MOZ_ASSERT(comp);
+ for (Realm* r : comp->realms()) {
+ if (r->hasLiveGlobal()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void Compartment::traceWeakNativeIterators(JSTracer* trc) {
+ /* Sweep list of native iterators. */
+ NativeIteratorListIter iter(&enumerators_);
+ while (!iter.done()) {
+ NativeIterator* ni = iter.next();
+ JSObject* iterObj = ni->iterObj();
+ if (!TraceManuallyBarrieredWeakEdge(trc, &iterObj,
+ "Compartment::enumerators_")) {
+ ni->unlink();
+ }
+ MOZ_ASSERT(ni->objectBeingIterated()->compartment() == this);
+ }
+}
diff --git a/js/src/vm/Compartment.h b/js/src/vm/Compartment.h
new file mode 100644
index 0000000000..beb883e87e
--- /dev/null
+++ b/js/src/vm/Compartment.h
@@ -0,0 +1,537 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Compartment_h
+#define vm_Compartment_h
+
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+
+#include <stddef.h>
+#include <utility>
+
+#include "gc/NurseryAwareHashMap.h"
+#include "gc/ZoneAllocator.h"
+#include "vm/Iteration.h"
+#include "vm/JSObject.h"
+#include "vm/JSScript.h"
+
+namespace js {
+
+JSString* CopyStringPure(JSContext* cx, JSString* str);
+
+// The data structure use to storing JSObject CCWs for a given source
+// compartment. These are partitioned by target compartment so that we can
+// easily select wrappers by source and target compartment. String CCWs are
+// stored in a per-zone separate map.
+class ObjectWrapperMap {
+ static const size_t InitialInnerMapSize = 4;
+
+ using InnerMap = NurseryAwareHashMap<JSObject*, JSObject*, ZoneAllocPolicy>;
+ using OuterMap = GCHashMap<JS::Compartment*, InnerMap,
+ DefaultHasher<JS::Compartment*>, ZoneAllocPolicy>;
+
+ OuterMap map;
+ Zone* zone;
+
+ public:
+ class Enum {
+ Enum(const Enum&) = delete;
+ void operator=(const Enum&) = delete;
+
+ void goToNext() {
+ if (outer.isNothing()) {
+ return;
+ }
+ for (; !outer->empty(); outer->popFront()) {
+ JS::Compartment* c = outer->front().key();
+ MOZ_ASSERT(c);
+ if (filter && !filter->match(c)) {
+ continue;
+ }
+ InnerMap& m = outer->front().value();
+ if (!m.empty()) {
+ if (inner.isSome()) {
+ inner.reset();
+ }
+ inner.emplace(m);
+ outer->popFront();
+ return;
+ }
+ }
+ }
+
+ mozilla::Maybe<OuterMap::Enum> outer;
+ mozilla::Maybe<InnerMap::Enum> inner;
+ const CompartmentFilter* filter;
+
+ public:
+ explicit Enum(ObjectWrapperMap& m) : filter(nullptr) {
+ outer.emplace(m.map);
+ goToNext();
+ }
+
+ Enum(ObjectWrapperMap& m, const CompartmentFilter& f) : filter(&f) {
+ outer.emplace(m.map);
+ goToNext();
+ }
+
+ Enum(ObjectWrapperMap& m, JS::Compartment* target) {
+ // Leave the outer map as nothing and only iterate the inner map we
+ // find here.
+ auto p = m.map.lookup(target);
+ if (p) {
+ inner.emplace(p->value());
+ }
+ }
+
+ bool empty() const {
+ return (outer.isNothing() || outer->empty()) &&
+ (inner.isNothing() || inner->empty());
+ }
+
+ InnerMap::Entry& front() const {
+ MOZ_ASSERT(inner.isSome() && !inner->empty());
+ return inner->front();
+ }
+
+ void popFront() {
+ MOZ_ASSERT(!empty());
+ if (!inner->empty()) {
+ inner->popFront();
+ if (!inner->empty()) {
+ return;
+ }
+ }
+ goToNext();
+ }
+
+ void removeFront() {
+ MOZ_ASSERT(inner.isSome());
+ inner->removeFront();
+ }
+ };
+
+ class Ptr : public InnerMap::Ptr {
+ friend class ObjectWrapperMap;
+
+ InnerMap* map;
+
+ Ptr() : InnerMap::Ptr(), map(nullptr) {}
+ Ptr(const InnerMap::Ptr& p, InnerMap& m) : InnerMap::Ptr(p), map(&m) {}
+ };
+
+ // Iterator over compartments that the ObjectWrapperMap has wrappers for.
+ class WrappedCompartmentEnum {
+ OuterMap::Enum iter;
+
+ void settle() {
+ // It's possible for InnerMap to be empty after wrappers have been
+ // removed, e.g. by being nuked.
+ while (!iter.empty() && iter.front().value().empty()) {
+ iter.popFront();
+ }
+ }
+
+ public:
+ explicit WrappedCompartmentEnum(ObjectWrapperMap& map) : iter(map.map) {
+ settle();
+ }
+ bool empty() const { return iter.empty(); }
+ JS::Compartment* front() const { return iter.front().key(); }
+ operator JS::Compartment*() const { return front(); }
+ void popFront() {
+ iter.popFront();
+ settle();
+ }
+ };
+
+ explicit ObjectWrapperMap(Zone* zone) : map(zone), zone(zone) {}
+ ObjectWrapperMap(Zone* zone, size_t aLen) : map(zone, aLen), zone(zone) {}
+
+ bool empty() {
+ if (map.empty()) {
+ return true;
+ }
+ for (OuterMap::Enum e(map); !e.empty(); e.popFront()) {
+ if (!e.front().value().empty()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ Ptr lookup(JSObject* obj) const {
+ auto op = map.lookup(obj->compartment());
+ if (op) {
+ auto ip = op->value().lookup(obj);
+ if (ip) {
+ return Ptr(ip, op->value());
+ }
+ }
+ return Ptr();
+ }
+
+ void remove(Ptr p) {
+ if (p) {
+ p.map->remove(p);
+ }
+ }
+
+ [[nodiscard]] bool put(JSObject* key, JSObject* value) {
+ JS::Compartment* comp = key->compartment();
+ auto ptr = map.lookupForAdd(comp);
+ if (!ptr) {
+ InnerMap m(zone, InitialInnerMapSize);
+ if (!map.add(ptr, comp, std::move(m))) {
+ return false;
+ }
+ }
+ return ptr->value().put(key, value);
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ size_t size = map.shallowSizeOfExcludingThis(mallocSizeOf);
+ for (OuterMap::Enum e(map); !e.empty(); e.popFront()) {
+ size += e.front().value().sizeOfExcludingThis(mallocSizeOf);
+ }
+ return size;
+ }
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ size_t size = map.shallowSizeOfIncludingThis(mallocSizeOf);
+ for (OuterMap::Enum e(map); !e.empty(); e.popFront()) {
+ size += e.front().value().sizeOfIncludingThis(mallocSizeOf);
+ }
+ return size;
+ }
+
+ bool hasNurseryAllocatedWrapperEntries(const CompartmentFilter& f) {
+ for (OuterMap::Enum e(map); !e.empty(); e.popFront()) {
+ JS::Compartment* c = e.front().key();
+ if (c && !f.match(c)) {
+ continue;
+ }
+ InnerMap& m = e.front().value();
+ if (m.hasNurseryEntries()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void sweepAfterMinorGC(JSTracer* trc) {
+ for (OuterMap::Enum e(map); !e.empty(); e.popFront()) {
+ InnerMap& m = e.front().value();
+ m.sweepAfterMinorGC(trc);
+ if (m.empty()) {
+ e.removeFront();
+ }
+ }
+ }
+
+ void traceWeak(JSTracer* trc) {
+ for (OuterMap::Enum e(map); !e.empty(); e.popFront()) {
+ InnerMap& m = e.front().value();
+ m.traceWeak(trc);
+ if (m.empty()) {
+ e.removeFront();
+ }
+ }
+ }
+};
+
+using StringWrapperMap =
+ NurseryAwareHashMap<JSString*, JSString*, ZoneAllocPolicy,
+ DuplicatesPossible>;
+
+} // namespace js
+
+class JS::Compartment {
+ JS::Zone* zone_;
+ JSRuntime* runtime_;
+ bool invisibleToDebugger_;
+
+ js::ObjectWrapperMap crossCompartmentObjectWrappers;
+
+ using RealmVector = js::Vector<JS::Realm*, 1, js::ZoneAllocPolicy>;
+ RealmVector realms_;
+
+ public:
+ /*
+ * During GC, stores the head of a list of incoming pointers from gray cells.
+ *
+ * The objects in the list are either cross-compartment wrappers, or
+ * debugger wrapper objects. The list link is either in the second extra
+ * slot for the former, or a special slot for the latter.
+ */
+ JSObject* gcIncomingGrayPointers = nullptr;
+
+ void* data = nullptr;
+
+ // Fields set and used by the GC. Be careful, may be stale after we return
+ // to the mutator.
+ struct {
+ // These flags help us to discover if a compartment that shouldn't be
+ // alive manages to outlive a GC. Note that these flags have to be on
+ // the compartment, not the realm, because same-compartment realms can
+ // have cross-realm pointers without wrappers.
+ bool scheduledForDestruction = false;
+ bool hasMarkedCells = false;
+ bool maybeAlive = true;
+
+ // During GC, we may set this to |true| if we entered a realm in this
+ // compartment. Note that (without a stack walk) we don't know exactly
+ // *which* realms, because Realm::enterRealmDepthIgnoringJit_ does not
+ // account for cross-Realm calls in JIT code updating cx->realm_. See
+ // also the enterRealmDepthIgnoringJit_ comment.
+ bool hasEnteredRealm = false;
+ } gcState;
+
+ // True if all outgoing wrappers have been nuked. This happens when all realms
+ // have been nuked and NukeCrossCompartmentWrappers is called with the
+ // NukeAllReferences option. This prevents us from creating new wrappers for
+ // the compartment.
+ bool nukedOutgoingWrappers = false;
+
+ JS::Zone* zone() { return zone_; }
+ const JS::Zone* zone() const { return zone_; }
+
+ JSRuntime* runtimeFromMainThread() const {
+ MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(runtime_));
+ return runtime_;
+ }
+
+ // Note: Unrestricted access to the zone's runtime from an arbitrary
+ // thread can easily lead to races. Use this method very carefully.
+ JSRuntime* runtimeFromAnyThread() const { return runtime_; }
+
+ // Certain compartments are implementation details of the embedding, and
+ // references to them should never leak out to script. For realms belonging to
+ // this compartment, onNewGlobalObject does not fire, and addDebuggee is a
+ // no-op.
+ bool invisibleToDebugger() const { return invisibleToDebugger_; }
+
+ RealmVector& realms() { return realms_; }
+
+ // Cross-compartment wrappers are shared by all realms in the compartment, but
+ // they still have a per-realm ObjectGroup etc. To prevent us from having
+ // multiple realms, each with some cross-compartment wrappers potentially
+ // keeping the realm alive longer than necessary, we always allocate CCWs in
+ // the first realm.
+ js::GlobalObject& firstGlobal() const;
+ js::GlobalObject& globalForNewCCW() const { return firstGlobal(); }
+
+ void assertNoCrossCompartmentWrappers() {
+ MOZ_ASSERT(crossCompartmentObjectWrappers.empty());
+ }
+
+ void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* compartmentObjects,
+ size_t* crossCompartmentWrappersTables,
+ size_t* compartmentsPrivateData);
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkObjectWrappersAfterMovingGC();
+#endif
+
+ private:
+ bool getNonWrapperObjectForCurrentCompartment(JSContext* cx,
+ js::HandleObject origObj,
+ js::MutableHandleObject obj);
+ bool getOrCreateWrapper(JSContext* cx, js::HandleObject existing,
+ js::MutableHandleObject obj);
+
+ public:
+ explicit Compartment(JS::Zone* zone, bool invisibleToDebugger);
+
+ void destroy(JS::GCContext* gcx);
+
+ [[nodiscard]] inline bool wrap(JSContext* cx, JS::MutableHandleValue vp);
+
+ [[nodiscard]] inline bool wrap(JSContext* cx,
+ MutableHandle<mozilla::Maybe<Value>> vp);
+
+ [[nodiscard]] bool wrap(JSContext* cx, js::MutableHandleString strp);
+ [[nodiscard]] bool wrap(JSContext* cx, js::MutableHandle<JS::BigInt*> bi);
+ [[nodiscard]] bool wrap(JSContext* cx, JS::MutableHandleObject obj);
+ [[nodiscard]] bool wrap(JSContext* cx,
+ JS::MutableHandle<JS::PropertyDescriptor> desc);
+ [[nodiscard]] bool wrap(
+ JSContext* cx,
+ JS::MutableHandle<mozilla::Maybe<JS::PropertyDescriptor>> desc);
+ [[nodiscard]] bool wrap(JSContext* cx,
+ JS::MutableHandle<JS::GCVector<JS::Value>> vec);
+#ifdef ENABLE_RECORD_TUPLE
+ [[nodiscard]] bool wrapExtendedPrimitive(JSContext* cx,
+ JS::MutableHandleObject obj);
+#endif
+ [[nodiscard]] bool rewrap(JSContext* cx, JS::MutableHandleObject obj,
+ JS::HandleObject existing);
+
+ [[nodiscard]] bool putWrapper(JSContext* cx, JSObject* wrapped,
+ JSObject* wrapper);
+
+ [[nodiscard]] bool putWrapper(JSContext* cx, JSString* wrapped,
+ JSString* wrapper);
+
+ js::ObjectWrapperMap::Ptr lookupWrapper(JSObject* obj) const {
+ return crossCompartmentObjectWrappers.lookup(obj);
+ }
+
+ inline js::StringWrapperMap::Ptr lookupWrapper(JSString* str) const;
+
+ void removeWrapper(js::ObjectWrapperMap::Ptr p);
+
+ bool hasNurseryAllocatedObjectWrapperEntries(const js::CompartmentFilter& f) {
+ return crossCompartmentObjectWrappers.hasNurseryAllocatedWrapperEntries(f);
+ }
+
+ // Iterator over |wrapped -> wrapper| entries for object CCWs in a given
+ // compartment. Can be optionally restricted by target compartment.
+ struct ObjectWrapperEnum : public js::ObjectWrapperMap::Enum {
+ explicit ObjectWrapperEnum(Compartment* c)
+ : js::ObjectWrapperMap::Enum(c->crossCompartmentObjectWrappers) {}
+ explicit ObjectWrapperEnum(Compartment* c, const js::CompartmentFilter& f)
+ : js::ObjectWrapperMap::Enum(c->crossCompartmentObjectWrappers, f) {}
+ explicit ObjectWrapperEnum(Compartment* c, Compartment* target)
+ : js::ObjectWrapperMap::Enum(c->crossCompartmentObjectWrappers,
+ target) {
+ MOZ_ASSERT(target);
+ }
+ };
+
+ // Iterator over compartments that this compartment has CCWs for.
+ struct WrappedObjectCompartmentEnum
+ : public js::ObjectWrapperMap::WrappedCompartmentEnum {
+ explicit WrappedObjectCompartmentEnum(Compartment* c)
+ : js::ObjectWrapperMap::WrappedCompartmentEnum(
+ c->crossCompartmentObjectWrappers) {}
+ };
+
+ /*
+ * These methods mark pointers that cross compartment boundaries. They are
+ * called in per-zone GCs to prevent the wrappers' outgoing edges from
+ * dangling (full GCs naturally follow pointers across compartments) and
+ * when compacting to update cross-compartment pointers.
+ */
+ enum EdgeSelector { AllEdges, NonGrayEdges, GrayEdges };
+ void traceWrapperTargetsInCollectedZones(JSTracer* trc,
+ EdgeSelector whichEdges);
+ static void traceIncomingCrossCompartmentEdgesForZoneGC(
+ JSTracer* trc, EdgeSelector whichEdges);
+
+ void sweepRealms(JS::GCContext* gcx, bool keepAtleastOne,
+ bool destroyingRuntime);
+ void sweepAfterMinorGC(JSTracer* trc);
+ void traceCrossCompartmentObjectWrapperEdges(JSTracer* trc);
+
+ void fixupCrossCompartmentObjectWrappersAfterMovingGC(JSTracer* trc);
+ void fixupAfterMovingGC(JSTracer* trc);
+
+ [[nodiscard]] bool findSweepGroupEdges();
+
+ private:
+ // Head node of list of active iterators that may need deleted property
+ // suppression.
+ js::NativeIteratorListHead enumerators_;
+
+ public:
+ js::NativeIteratorListHead* enumeratorsAddr() { return &enumerators_; }
+ MOZ_ALWAYS_INLINE bool objectMaybeInIteration(JSObject* obj);
+
+ void traceWeakNativeIterators(JSTracer* trc);
+};
+
+namespace js {
+
+// We only set the hasMarkedCells flag for objects and scripts. It's assumed
+// that, if a compartment is alive, then it will have at least some live object
+// or script it in. Even if we get this wrong, the worst that will happen is
+// that scheduledForDestruction will be set on the compartment, which will cause
+// some extra GC activity to try to free the compartment.
+template <typename T>
+inline void SetCompartmentHasMarkedCells(T* thing) {}
+
+template <>
+inline void SetCompartmentHasMarkedCells(JSObject* thing) {
+ thing->compartment()->gcState.hasMarkedCells = true;
+}
+
+template <>
+inline void SetCompartmentHasMarkedCells(JSScript* thing) {
+ thing->compartment()->gcState.hasMarkedCells = true;
+}
+
+/*
+ * AutoWrapperVector and AutoWrapperRooter can be used to store wrappers that
+ * are obtained from the cross-compartment map. However, these classes should
+ * not be used if the wrapper will escape. For example, it should not be stored
+ * in the heap.
+ *
+ * The AutoWrapper rooters are different from other autorooters because their
+ * wrappers are marked on every GC slice rather than just the first one. If
+ * there's some wrapper that we want to use temporarily without causing it to be
+ * marked, we can use these AutoWrapper classes. If we get unlucky and a GC
+ * slice runs during the code using the wrapper, the GC will mark the wrapper so
+ * that it doesn't get swept out from under us. Otherwise, the wrapper needn't
+ * be marked. This is useful in functions like JS_TransplantObject that
+ * manipulate wrappers in compartments that may no longer be alive.
+ */
+
+/*
+ * This class stores the data for AutoWrapperVector and AutoWrapperRooter. It
+ * should not be used in any other situations.
+ */
+struct WrapperValue {
+ /*
+ * We use unsafeGet() in the constructors to avoid invoking a read barrier
+ * on the wrapper, which may be dead (see the comment about bug 803376 in
+ * gc/GC.cpp regarding this). If there is an incremental GC while the
+ * wrapper is in use, the AutoWrapper rooter will ensure the wrapper gets
+ * marked.
+ */
+ explicit WrapperValue(const ObjectWrapperMap::Ptr& ptr)
+ : value(*ptr->value().unsafeGet()) {}
+
+ explicit WrapperValue(const ObjectWrapperMap::Enum& e)
+ : value(*e.front().value().unsafeGet()) {}
+
+ JSObject*& get() { return value; }
+ JSObject* get() const { return value; }
+ operator JSObject*() const { return value; }
+
+ private:
+ JSObject* value;
+};
+
+class MOZ_RAII AutoWrapperVector : public JS::GCVector<WrapperValue, 8>,
+ public JS::AutoGCRooter {
+ public:
+ explicit AutoWrapperVector(JSContext* cx)
+ : JS::GCVector<WrapperValue, 8>(cx),
+ JS::AutoGCRooter(cx, JS::AutoGCRooter::Kind::WrapperVector) {}
+
+ void trace(JSTracer* trc);
+
+ private:
+};
+
+class MOZ_RAII AutoWrapperRooter : public JS::AutoGCRooter {
+ public:
+ AutoWrapperRooter(JSContext* cx, const WrapperValue& v)
+ : JS::AutoGCRooter(cx, JS::AutoGCRooter::Kind::Wrapper), value(v) {}
+
+ operator JSObject*() const { return value; }
+
+ void trace(JSTracer* trc);
+
+ private:
+ WrapperValue value;
+};
+
+} /* namespace js */
+
+#endif /* vm_Compartment_h */
diff --git a/js/src/vm/CompilationAndEvaluation.cpp b/js/src/vm/CompilationAndEvaluation.cpp
new file mode 100644
index 0000000000..786002beb0
--- /dev/null
+++ b/js/src/vm/CompilationAndEvaluation.cpp
@@ -0,0 +1,613 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Same-thread compilation and evaluation APIs. */
+
+#include "js/CompilationAndEvaluation.h"
+
+#include "mozilla/Maybe.h" // mozilla::None, mozilla::Some
+#include "mozilla/Utf8.h" // mozilla::Utf8Unit
+
+#include <utility> // std::move
+
+#include "jsapi.h" // JS_WrapValue
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "debugger/DebugAPI.h"
+#include "frontend/BytecodeCompilation.h" // frontend::CompileGlobalScript
+#include "frontend/BytecodeCompiler.h" // frontend::IsIdentifier
+#include "frontend/CompilationStencil.h" // for frontened::{CompilationStencil, BorrowingCompilationStencil, CompilationGCOutput}
+#include "frontend/FrontendContext.h" // js::AutoReportFrontendContext
+#include "frontend/Parser.h" // frontend::Parser, frontend::ParseGoal
+#include "js/CharacterEncoding.h" // JS::UTF8Chars, JS::UTF8CharsToNewTwoByteCharsZ
+#include "js/experimental/JSStencil.h" // JS::Stencil
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/RootingAPI.h" // JS::Rooted
+#include "js/SourceText.h" // JS::SourceText
+#include "js/TypeDecls.h" // JS::HandleObject, JS::MutableHandleScript
+#include "js/Utility.h" // js::MallocArena, JS::UniqueTwoByteChars
+#include "js/Value.h" // JS::Value
+#include "util/CompleteFile.h" // js::FileContents, js::ReadCompleteFile
+#include "util/StringBuffer.h" // js::StringBuffer
+#include "vm/EnvironmentObject.h" // js::CreateNonSyntacticEnvironmentChain
+#include "vm/ErrorReporting.h" // js::ErrorMetadata, js::ReportCompileErrorLatin1
+#include "vm/Interpreter.h" // js::Execute
+#include "vm/JSContext.h" // JSContext
+
+#include "vm/JSContext-inl.h" // JSContext::check
+
+using mozilla::Utf8Unit;
+
+using JS::CompileOptions;
+using JS::HandleObject;
+using JS::ReadOnlyCompileOptions;
+using JS::SourceOwnership;
+using JS::SourceText;
+using JS::UniqueTwoByteChars;
+using JS::UTF8Chars;
+using JS::UTF8CharsToNewTwoByteCharsZ;
+
+using namespace js;
+
+JS_PUBLIC_API void JS::detail::ReportSourceTooLong(JSContext* cx) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SOURCE_TOO_LONG);
+}
+
+static void ReportSourceTooLongImpl(JS::FrontendContext* fc, ...) {
+ va_list args;
+ va_start(args, fc);
+
+ js::ErrorMetadata metadata;
+ metadata.filename = "<unknown>";
+ metadata.lineNumber = 0;
+ metadata.columnNumber = 0;
+ metadata.lineLength = 0;
+ metadata.tokenOffset = 0;
+ metadata.isMuted = false;
+
+ js::ReportCompileErrorLatin1(fc, std::move(metadata), nullptr,
+ JSMSG_SOURCE_TOO_LONG, &args);
+
+ va_end(args);
+}
+
+JS_PUBLIC_API void JS::detail::ReportSourceTooLong(JS::FrontendContext* fc) {
+ ReportSourceTooLongImpl(fc);
+}
+
+template <typename Unit>
+static JSScript* CompileSourceBuffer(JSContext* cx,
+ const ReadOnlyCompileOptions& options,
+ SourceText<Unit>& srcBuf) {
+ ScopeKind scopeKind =
+ options.nonSyntacticScope ? ScopeKind::NonSyntactic : ScopeKind::Global;
+
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ JS::Rooted<JSScript*> script(cx);
+ {
+ AutoReportFrontendContext fc(cx);
+ script = frontend::CompileGlobalScript(cx, &fc, options, srcBuf, scopeKind);
+ }
+ return script;
+}
+
+JSScript* JS::Compile(JSContext* cx, const ReadOnlyCompileOptions& options,
+ SourceText<char16_t>& srcBuf) {
+ return CompileSourceBuffer(cx, options, srcBuf);
+}
+
+JSScript* JS::Compile(JSContext* cx, const ReadOnlyCompileOptions& options,
+ SourceText<Utf8Unit>& srcBuf) {
+ return CompileSourceBuffer(cx, options, srcBuf);
+}
+
+JS_PUBLIC_API bool JS::StartIncrementalEncoding(JSContext* cx,
+ RefPtr<JS::Stencil>&& stencil) {
+ MOZ_ASSERT(cx);
+ MOZ_ASSERT(!stencil->hasMultipleReference());
+
+ auto* source = stencil->source.get();
+
+ UniquePtr<frontend::ExtensibleCompilationStencil> initial;
+ if (stencil->hasOwnedBorrow()) {
+ initial.reset(stencil->takeOwnedBorrow());
+ stencil = nullptr;
+ } else {
+ initial = cx->make_unique<frontend::ExtensibleCompilationStencil>(
+ stencil->source);
+ if (!initial) {
+ return false;
+ }
+
+ AutoReportFrontendContext fc(cx);
+ if (!initial->steal(&fc, std::move(stencil))) {
+ return false;
+ }
+ }
+
+ return source->startIncrementalEncoding(cx, std::move(initial));
+}
+
+JSScript* JS::CompileUtf8File(JSContext* cx,
+ const ReadOnlyCompileOptions& options,
+ FILE* file) {
+ FileContents buffer(cx);
+ if (!ReadCompleteFile(cx, file, buffer)) {
+ return nullptr;
+ }
+
+ SourceText<Utf8Unit> srcBuf;
+ if (!srcBuf.init(cx, reinterpret_cast<const char*>(buffer.begin()),
+ buffer.length(), SourceOwnership::Borrowed)) {
+ return nullptr;
+ }
+
+ return CompileSourceBuffer(cx, options, srcBuf);
+}
+
+JSScript* JS::CompileUtf8Path(JSContext* cx,
+ const ReadOnlyCompileOptions& optionsArg,
+ const char* filename) {
+ AutoFile file;
+ if (!file.open(cx, filename)) {
+ return nullptr;
+ }
+
+ CompileOptions options(cx, optionsArg);
+ options.setFileAndLine(filename, 1);
+ return CompileUtf8File(cx, options, file.fp());
+}
+
+JS_PUBLIC_API bool JS_Utf8BufferIsCompilableUnit(JSContext* cx,
+ HandleObject obj,
+ const char* utf8,
+ size_t length) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj);
+
+ cx->clearPendingException();
+
+ JS::UniqueTwoByteChars chars{
+ UTF8CharsToNewTwoByteCharsZ(cx, UTF8Chars(utf8, length), &length,
+ js::MallocArena)
+ .get()};
+ if (!chars) {
+ return true;
+ }
+
+ // Return true on any out-of-memory error or non-EOF-related syntax error, so
+ // our caller doesn't try to collect more buffered source.
+ bool result = true;
+
+ using frontend::FullParseHandler;
+ using frontend::ParseGoal;
+ using frontend::Parser;
+
+ AutoReportFrontendContext fc(cx,
+ AutoReportFrontendContext::Warning::Suppress);
+ CompileOptions options(cx);
+ Rooted<frontend::CompilationInput> input(cx,
+ frontend::CompilationInput(options));
+ if (!input.get().initForGlobal(&fc)) {
+ return false;
+ }
+
+ LifoAllocScope allocScope(&cx->tempLifoAlloc());
+ js::frontend::NoScopeBindingCache scopeCache;
+ frontend::CompilationState compilationState(&fc, allocScope, input.get());
+ if (!compilationState.init(&fc, &scopeCache)) {
+ return false;
+ }
+
+ Parser<FullParseHandler, char16_t> parser(&fc, options, chars.get(), length,
+ /* foldConstants = */ true,
+ compilationState,
+ /* syntaxParser = */ nullptr);
+ if (!parser.checkOptions() || !parser.parse()) {
+ // We ran into an error. If it was because we ran out of source, we
+ // return false so our caller knows to try to collect more buffered
+ // source.
+ if (parser.isUnexpectedEOF()) {
+ result = false;
+ }
+
+ cx->clearPendingException();
+ }
+
+ return result;
+}
+
+class FunctionCompiler {
+ private:
+ JSContext* const cx_;
+ Rooted<JSAtom*> nameAtom_;
+ StringBuffer funStr_;
+
+ uint32_t parameterListEnd_ = 0;
+ bool nameIsIdentifier_ = true;
+
+ public:
+ explicit FunctionCompiler(JSContext* cx, FrontendContext* fc)
+ : cx_(cx), nameAtom_(cx), funStr_(fc) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ }
+
+ [[nodiscard]] bool init(const char* name, unsigned nargs,
+ const char* const* argnames) {
+ if (!funStr_.ensureTwoByteChars()) {
+ return false;
+ }
+ if (!funStr_.append("function ")) {
+ return false;
+ }
+
+ if (name) {
+ size_t nameLen = strlen(name);
+
+ nameAtom_ = Atomize(cx_, name, nameLen);
+ if (!nameAtom_) {
+ return false;
+ }
+
+ // If the name is an identifier, we can just add it to source text.
+ // Otherwise we'll have to set it manually later.
+ nameIsIdentifier_ = js::frontend::IsIdentifier(
+ reinterpret_cast<const Latin1Char*>(name), nameLen);
+ if (nameIsIdentifier_) {
+ if (!funStr_.append(nameAtom_)) {
+ return false;
+ }
+ }
+ }
+
+ if (!funStr_.append("(")) {
+ return false;
+ }
+
+ for (unsigned i = 0; i < nargs; i++) {
+ if (i != 0) {
+ if (!funStr_.append(", ")) {
+ return false;
+ }
+ }
+ if (!funStr_.append(argnames[i], strlen(argnames[i]))) {
+ return false;
+ }
+ }
+
+ // Remember the position of ")".
+ parameterListEnd_ = funStr_.length();
+ static_assert(FunctionConstructorMedialSigils[0] == ')');
+
+ return funStr_.append(FunctionConstructorMedialSigils.data(),
+ FunctionConstructorMedialSigils.length());
+ }
+
+ template <typename Unit>
+ [[nodiscard]] inline bool addFunctionBody(const SourceText<Unit>& srcBuf) {
+ return funStr_.append(srcBuf.get(), srcBuf.length());
+ }
+
+ JSFunction* finish(HandleObjectVector envChain,
+ const ReadOnlyCompileOptions& optionsArg) {
+ using js::frontend::FunctionSyntaxKind;
+
+ if (!funStr_.append(FunctionConstructorFinalBrace.data(),
+ FunctionConstructorFinalBrace.length())) {
+ return nullptr;
+ }
+
+ size_t newLen = funStr_.length();
+ UniqueTwoByteChars stolen(funStr_.stealChars());
+ if (!stolen) {
+ return nullptr;
+ }
+
+ SourceText<char16_t> newSrcBuf;
+ if (!newSrcBuf.init(cx_, std::move(stolen), newLen)) {
+ return nullptr;
+ }
+
+ RootedObject enclosingEnv(cx_);
+ ScopeKind kind;
+ if (envChain.empty()) {
+ // A compiled function has a burned-in environment chain, so if no exotic
+ // environment was requested, we can use the global lexical environment
+ // directly and not need to worry about any potential non-syntactic scope.
+ enclosingEnv.set(&cx_->global()->lexicalEnvironment());
+ kind = ScopeKind::Global;
+ } else {
+ if (!CreateNonSyntacticEnvironmentChain(cx_, envChain, &enclosingEnv)) {
+ return nullptr;
+ }
+ kind = ScopeKind::NonSyntactic;
+ }
+
+ cx_->check(enclosingEnv);
+
+ // Make sure the static scope chain matches up when we have a
+ // non-syntactic scope.
+ MOZ_ASSERT_IF(!IsGlobalLexicalEnvironment(enclosingEnv),
+ kind == ScopeKind::NonSyntactic);
+
+ CompileOptions options(cx_, optionsArg);
+ options.setNonSyntacticScope(kind == ScopeKind::NonSyntactic);
+
+ FunctionSyntaxKind syntaxKind = FunctionSyntaxKind::Statement;
+ RootedFunction fun(cx_);
+ if (kind == ScopeKind::NonSyntactic) {
+ Rooted<Scope*> enclosingScope(
+ cx_, GlobalScope::createEmpty(cx_, ScopeKind::NonSyntactic));
+ if (!enclosingScope) {
+ return nullptr;
+ }
+
+ fun = js::frontend::CompileStandaloneFunctionInNonSyntacticScope(
+ cx_, options, newSrcBuf, mozilla::Some(parameterListEnd_), syntaxKind,
+ enclosingScope);
+ } else {
+ fun = js::frontend::CompileStandaloneFunction(
+ cx_, options, newSrcBuf, mozilla::Some(parameterListEnd_),
+ syntaxKind);
+ }
+ if (!fun) {
+ return nullptr;
+ }
+
+ // When the function name isn't a valid identifier, the generated function
+ // source in srcBuf won't include the name, so name the function manually.
+ if (!nameIsIdentifier_) {
+ fun->setAtom(nameAtom_);
+ }
+
+ if (fun->isInterpreted()) {
+ fun->initEnvironment(enclosingEnv);
+ }
+
+ return fun;
+ }
+};
+
+JS_PUBLIC_API JSFunction* JS::CompileFunction(
+ JSContext* cx, HandleObjectVector envChain,
+ const ReadOnlyCompileOptions& options, const char* name, unsigned nargs,
+ const char* const* argnames, SourceText<char16_t>& srcBuf) {
+ ManualReportFrontendContext fc(cx);
+ FunctionCompiler compiler(cx, &fc);
+ if (!compiler.init(name, nargs, argnames) ||
+ !compiler.addFunctionBody(srcBuf)) {
+ fc.failure();
+ return nullptr;
+ }
+
+ fc.ok();
+ return compiler.finish(envChain, options);
+}
+
+JS_PUBLIC_API JSFunction* JS::CompileFunction(
+ JSContext* cx, HandleObjectVector envChain,
+ const ReadOnlyCompileOptions& options, const char* name, unsigned nargs,
+ const char* const* argnames, SourceText<Utf8Unit>& srcBuf) {
+ ManualReportFrontendContext fc(cx);
+ FunctionCompiler compiler(cx, &fc);
+ if (!compiler.init(name, nargs, argnames) ||
+ !compiler.addFunctionBody(srcBuf)) {
+ fc.failure();
+ return nullptr;
+ }
+
+ fc.ok();
+ return compiler.finish(envChain, options);
+}
+
+JS_PUBLIC_API JSFunction* JS::CompileFunctionUtf8(
+ JSContext* cx, HandleObjectVector envChain,
+ const ReadOnlyCompileOptions& options, const char* name, unsigned nargs,
+ const char* const* argnames, const char* bytes, size_t length) {
+ SourceText<Utf8Unit> srcBuf;
+ if (!srcBuf.init(cx, bytes, length, SourceOwnership::Borrowed)) {
+ return nullptr;
+ }
+
+ return CompileFunction(cx, envChain, options, name, nargs, argnames, srcBuf);
+}
+
+JS_PUBLIC_API void JS::ExposeScriptToDebugger(JSContext* cx,
+ HandleScript script) {
+ MOZ_ASSERT(cx);
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+
+ DebugAPI::onNewScript(cx, script);
+}
+
+JS_PUBLIC_API bool JS::UpdateDebugMetadata(
+ JSContext* cx, Handle<JSScript*> script, const InstantiateOptions& options,
+ HandleValue privateValue, HandleString elementAttributeName,
+ HandleScript introScript, HandleScript scriptOrModule) {
+ Rooted<ScriptSourceObject*> sso(cx, script->sourceObject());
+
+ if (!ScriptSourceObject::initElementProperties(cx, sso,
+ elementAttributeName)) {
+ return false;
+ }
+
+ // There is no equivalent of cross-compartment wrappers for scripts. If the
+ // introduction script and ScriptSourceObject are in different compartments,
+ // we would be creating a cross-compartment script reference, which is
+ // forbidden. We can still store a CCW to the script source object though.
+ RootedValue introductionScript(cx);
+ if (introScript) {
+ if (introScript->compartment() == cx->compartment()) {
+ introductionScript.setPrivateGCThing(introScript);
+ }
+ }
+ sso->setIntroductionScript(introductionScript);
+
+ RootedValue privateValueStore(cx, UndefinedValue());
+ if (privateValue.isUndefined()) {
+ // Set the private value to that of the script or module that this source is
+ // part of, if any.
+ if (scriptOrModule) {
+ privateValueStore = scriptOrModule->sourceObject()->getPrivate();
+ }
+ } else {
+ privateValueStore = privateValue;
+ }
+
+ if (!privateValueStore.isUndefined()) {
+ if (!JS_WrapValue(cx, &privateValueStore)) {
+ return false;
+ }
+ }
+ sso->setPrivate(cx->runtime(), privateValueStore);
+
+ if (!options.hideScriptFromDebugger) {
+ JS::ExposeScriptToDebugger(cx, script);
+ }
+
+ return true;
+}
+
+MOZ_NEVER_INLINE static bool ExecuteScript(JSContext* cx, HandleObject envChain,
+ HandleScript script,
+ MutableHandleValue rval) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(envChain, script);
+
+ if (!IsGlobalLexicalEnvironment(envChain)) {
+ MOZ_RELEASE_ASSERT(script->hasNonSyntacticScope());
+ }
+
+ return Execute(cx, script, envChain, rval);
+}
+
+static bool ExecuteScript(JSContext* cx, HandleObjectVector envChain,
+ HandleScript script, MutableHandleValue rval) {
+ RootedObject env(cx);
+ if (!CreateNonSyntacticEnvironmentChain(cx, envChain, &env)) {
+ return false;
+ }
+
+ return ExecuteScript(cx, env, script, rval);
+}
+
+MOZ_NEVER_INLINE JS_PUBLIC_API bool JS_ExecuteScript(JSContext* cx,
+ HandleScript scriptArg,
+ MutableHandleValue rval) {
+ RootedObject globalLexical(cx, &cx->global()->lexicalEnvironment());
+ return ExecuteScript(cx, globalLexical, scriptArg, rval);
+}
+
+MOZ_NEVER_INLINE JS_PUBLIC_API bool JS_ExecuteScript(JSContext* cx,
+ HandleScript scriptArg) {
+ RootedObject globalLexical(cx, &cx->global()->lexicalEnvironment());
+ RootedValue rval(cx);
+ return ExecuteScript(cx, globalLexical, scriptArg, &rval);
+}
+
+MOZ_NEVER_INLINE JS_PUBLIC_API bool JS_ExecuteScript(
+ JSContext* cx, HandleObjectVector envChain, HandleScript scriptArg,
+ MutableHandleValue rval) {
+ return ExecuteScript(cx, envChain, scriptArg, rval);
+}
+
+MOZ_NEVER_INLINE JS_PUBLIC_API bool JS_ExecuteScript(
+ JSContext* cx, HandleObjectVector envChain, HandleScript scriptArg) {
+ RootedValue rval(cx);
+ return ExecuteScript(cx, envChain, scriptArg, &rval);
+}
+
+template <typename Unit>
+static bool EvaluateSourceBuffer(JSContext* cx, ScopeKind scopeKind,
+ Handle<JSObject*> env,
+ const ReadOnlyCompileOptions& optionsArg,
+ SourceText<Unit>& srcBuf,
+ MutableHandle<Value> rval) {
+ CompileOptions options(cx, optionsArg);
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(env);
+ MOZ_ASSERT_IF(!IsGlobalLexicalEnvironment(env),
+ scopeKind == ScopeKind::NonSyntactic);
+
+ options.setNonSyntacticScope(scopeKind == ScopeKind::NonSyntactic);
+ options.setIsRunOnce(true);
+
+ AutoReportFrontendContext fc(cx);
+ RootedScript script(
+ cx, frontend::CompileGlobalScript(cx, &fc, options, srcBuf, scopeKind));
+ if (!script) {
+ return false;
+ }
+
+ return Execute(cx, script, env, rval);
+}
+
+JS_PUBLIC_API bool JS::Evaluate(JSContext* cx,
+ const ReadOnlyCompileOptions& options,
+ SourceText<Utf8Unit>& srcBuf,
+ MutableHandle<Value> rval) {
+ RootedObject globalLexical(cx, &cx->global()->lexicalEnvironment());
+ return EvaluateSourceBuffer(cx, ScopeKind::Global, globalLexical, options,
+ srcBuf, rval);
+}
+
+JS_PUBLIC_API bool JS::Evaluate(JSContext* cx,
+ const ReadOnlyCompileOptions& optionsArg,
+ SourceText<char16_t>& srcBuf,
+ MutableHandleValue rval) {
+ RootedObject globalLexical(cx, &cx->global()->lexicalEnvironment());
+ return EvaluateSourceBuffer(cx, ScopeKind::Global, globalLexical, optionsArg,
+ srcBuf, rval);
+}
+
+JS_PUBLIC_API bool JS::Evaluate(JSContext* cx, HandleObjectVector envChain,
+ const ReadOnlyCompileOptions& options,
+ SourceText<char16_t>& srcBuf,
+ MutableHandleValue rval) {
+ RootedObject env(cx);
+ if (!CreateNonSyntacticEnvironmentChain(cx, envChain, &env)) {
+ return false;
+ }
+
+ return EvaluateSourceBuffer(cx, ScopeKind::NonSyntactic, env, options, srcBuf,
+ rval);
+}
+
+JS_PUBLIC_API bool JS::EvaluateUtf8Path(
+ JSContext* cx, const ReadOnlyCompileOptions& optionsArg,
+ const char* filename, MutableHandleValue rval) {
+ FileContents buffer(cx);
+ {
+ AutoFile file;
+ if (!file.open(cx, filename) || !file.readAll(cx, buffer)) {
+ return false;
+ }
+ }
+
+ CompileOptions options(cx, optionsArg);
+ options.setFileAndLine(filename, 1);
+
+ auto contents = reinterpret_cast<const char*>(buffer.begin());
+ size_t length = buffer.length();
+
+ JS::SourceText<Utf8Unit> srcBuf;
+ if (!srcBuf.init(cx, contents, length, JS::SourceOwnership::Borrowed)) {
+ return false;
+ }
+
+ return Evaluate(cx, options, srcBuf, rval);
+}
diff --git a/js/src/vm/CompletionKind.h b/js/src/vm/CompletionKind.h
new file mode 100644
index 0000000000..0f9168ce83
--- /dev/null
+++ b/js/src/vm/CompletionKind.h
@@ -0,0 +1,16 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_CompletionKind_h
+#define vm_CompletionKind_h
+
+namespace js {
+
+enum class CompletionKind : uint8_t { Normal, Return, Throw };
+
+} // namespace js
+
+#endif // vm_CompletionKind_h
diff --git a/js/src/vm/Compression.cpp b/js/src/vm/Compression.cpp
new file mode 100644
index 0000000000..eda3c9a522
--- /dev/null
+++ b/js/src/vm/Compression.cpp
@@ -0,0 +1,262 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/Compression.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MemoryChecking.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/ScopeExit.h"
+
+#include "js/Utility.h"
+#include "util/Memory.h"
+
+using namespace js;
+
+static void* zlib_alloc(void* cx, uInt items, uInt size) {
+ return js_calloc(items, size);
+}
+
+static void zlib_free(void* cx, void* addr) { js_free(addr); }
+
+Compressor::Compressor(const unsigned char* inp, size_t inplen)
+ : inp(inp),
+ inplen(inplen),
+ initialized(false),
+ finished(false),
+ currentChunkSize(0),
+ chunkOffsets() {
+ MOZ_ASSERT(inplen > 0, "data to compress can't be empty");
+
+ zs.opaque = nullptr;
+ zs.next_in = (Bytef*)inp;
+ zs.avail_in = 0;
+ zs.next_out = nullptr;
+ zs.avail_out = 0;
+ zs.zalloc = zlib_alloc;
+ zs.zfree = zlib_free;
+ zs.total_in = 0;
+ zs.total_out = 0;
+ zs.msg = nullptr;
+ zs.state = nullptr;
+ zs.data_type = 0;
+ zs.adler = 0;
+ zs.reserved = 0;
+
+ // Reserve space for the CompressedDataHeader.
+ outbytes = sizeof(CompressedDataHeader);
+}
+
+Compressor::~Compressor() {
+ if (initialized) {
+ int ret = deflateEnd(&zs);
+ if (ret != Z_OK) {
+ // If we finished early, we can get a Z_DATA_ERROR.
+ MOZ_ASSERT(ret == Z_DATA_ERROR);
+ MOZ_ASSERT(!finished);
+ }
+ }
+}
+
+// According to the zlib docs, the default value for windowBits is 15. Passing
+// -15 is treated the same, but it also forces 'raw deflate' (no zlib header or
+// trailer). Raw deflate is necessary for chunked decompression.
+static const int WindowBits = -15;
+
+bool Compressor::init() {
+ if (inplen >= UINT32_MAX) {
+ return false;
+ }
+ // zlib is slow and we'd rather be done compression sooner
+ // even if it means decompression is slower which penalizes
+ // Function.toString()
+ int ret = deflateInit2(&zs, Z_BEST_SPEED, Z_DEFLATED, WindowBits, 8,
+ Z_DEFAULT_STRATEGY);
+ if (ret != Z_OK) {
+ MOZ_ASSERT(ret == Z_MEM_ERROR);
+ return false;
+ }
+ initialized = true;
+ return true;
+}
+
+void Compressor::setOutput(unsigned char* out, size_t outlen) {
+ MOZ_ASSERT(outlen > outbytes);
+ zs.next_out = out + outbytes;
+ zs.avail_out = outlen - outbytes;
+}
+
+Compressor::Status Compressor::compressMore() {
+ MOZ_ASSERT(zs.next_out);
+ uInt left = inplen - (zs.next_in - inp);
+ if (left <= MAX_INPUT_SIZE) {
+ zs.avail_in = left;
+ } else if (zs.avail_in == 0) {
+ zs.avail_in = MAX_INPUT_SIZE;
+ }
+
+ // Finish the current chunk if needed.
+ bool flush = false;
+ MOZ_ASSERT(currentChunkSize <= CHUNK_SIZE);
+ if (currentChunkSize + zs.avail_in >= CHUNK_SIZE) {
+ // Adjust avail_in, so we don't get chunks that are larger than
+ // CHUNK_SIZE.
+ zs.avail_in = CHUNK_SIZE - currentChunkSize;
+ MOZ_ASSERT(currentChunkSize + zs.avail_in == CHUNK_SIZE);
+ flush = true;
+ }
+
+ MOZ_ASSERT(zs.avail_in <= left);
+ bool done = zs.avail_in == left;
+
+ Bytef* oldin = zs.next_in;
+ Bytef* oldout = zs.next_out;
+ int ret = deflate(&zs, done ? Z_FINISH : (flush ? Z_FULL_FLUSH : Z_NO_FLUSH));
+ outbytes += zs.next_out - oldout;
+ currentChunkSize += zs.next_in - oldin;
+ MOZ_ASSERT(currentChunkSize <= CHUNK_SIZE);
+
+ if (ret == Z_MEM_ERROR) {
+ zs.avail_out = 0;
+ return OOM;
+ }
+ if (ret == Z_BUF_ERROR || (ret == Z_OK && zs.avail_out == 0)) {
+ // We have to resize the output buffer. Note that we're not done yet
+ // because ret != Z_STREAM_END.
+ MOZ_ASSERT(zs.avail_out == 0);
+ return MOREOUTPUT;
+ }
+
+ if (done || currentChunkSize == CHUNK_SIZE) {
+ MOZ_ASSERT_IF(!done, flush);
+ MOZ_ASSERT(chunkSize(inplen, chunkOffsets.length()) == currentChunkSize);
+ if (!chunkOffsets.append(outbytes)) {
+ return OOM;
+ }
+ currentChunkSize = 0;
+ MOZ_ASSERT_IF(done, chunkOffsets.length() == (inplen - 1) / CHUNK_SIZE + 1);
+ }
+
+ MOZ_ASSERT_IF(!done, ret == Z_OK);
+ MOZ_ASSERT_IF(done, ret == Z_STREAM_END);
+ return done ? DONE : CONTINUE;
+}
+
+size_t Compressor::totalBytesNeeded() const {
+ return AlignBytes(outbytes, sizeof(uint32_t)) + sizeOfChunkOffsets();
+}
+
+void Compressor::finish(char* dest, size_t destBytes) {
+ MOZ_ASSERT(!chunkOffsets.empty());
+
+ CompressedDataHeader* compressedHeader =
+ reinterpret_cast<CompressedDataHeader*>(dest);
+ compressedHeader->compressedBytes = outbytes;
+
+ size_t outbytesAligned = AlignBytes(outbytes, sizeof(uint32_t));
+
+ // Zero the padding bytes, the ImmutableStringsCache will hash them.
+ mozilla::PodZero(dest + outbytes, outbytesAligned - outbytes);
+
+ uint32_t* destArr = reinterpret_cast<uint32_t*>(dest + outbytesAligned);
+
+ MOZ_ASSERT(uintptr_t(dest + destBytes) ==
+ uintptr_t(destArr + chunkOffsets.length()));
+ mozilla::PodCopy(destArr, chunkOffsets.begin(), chunkOffsets.length());
+
+ finished = true;
+}
+
+bool js::DecompressString(const unsigned char* inp, size_t inplen,
+ unsigned char* out, size_t outlen) {
+ MOZ_ASSERT(inplen <= UINT32_MAX);
+
+ // Mark the memory we pass to zlib as initialized for MSan.
+ MOZ_MAKE_MEM_DEFINED(out, outlen);
+
+ z_stream zs;
+ zs.zalloc = zlib_alloc;
+ zs.zfree = zlib_free;
+ zs.opaque = nullptr;
+ zs.next_in = (Bytef*)inp;
+ zs.avail_in = inplen;
+ zs.next_out = out;
+ MOZ_ASSERT(outlen);
+ zs.avail_out = outlen;
+ int ret = inflateInit(&zs);
+ if (ret != Z_OK) {
+ MOZ_ASSERT(ret == Z_MEM_ERROR);
+ return false;
+ }
+ ret = inflate(&zs, Z_FINISH);
+ MOZ_ASSERT(ret == Z_STREAM_END);
+ ret = inflateEnd(&zs);
+ MOZ_ASSERT(ret == Z_OK);
+ return true;
+}
+
+bool js::DecompressStringChunk(const unsigned char* inp, size_t chunk,
+ unsigned char* out, size_t outlen) {
+ MOZ_ASSERT(outlen <= Compressor::CHUNK_SIZE);
+
+ const CompressedDataHeader* header =
+ reinterpret_cast<const CompressedDataHeader*>(inp);
+
+ size_t compressedBytes = header->compressedBytes;
+ size_t compressedBytesAligned = AlignBytes(compressedBytes, sizeof(uint32_t));
+
+ const unsigned char* offsetBytes = inp + compressedBytesAligned;
+ const uint32_t* offsets = reinterpret_cast<const uint32_t*>(offsetBytes);
+
+ uint32_t compressedStart =
+ chunk > 0 ? offsets[chunk - 1] : sizeof(CompressedDataHeader);
+ uint32_t compressedEnd = offsets[chunk];
+
+ MOZ_ASSERT(compressedStart < compressedEnd);
+ MOZ_ASSERT(compressedEnd <= compressedBytes);
+
+ bool lastChunk = compressedEnd == compressedBytes;
+
+ // Mark the memory we pass to zlib as initialized for MSan.
+ MOZ_MAKE_MEM_DEFINED(out, outlen);
+
+ z_stream zs;
+ zs.zalloc = zlib_alloc;
+ zs.zfree = zlib_free;
+ zs.opaque = nullptr;
+ zs.next_in = (Bytef*)(inp + compressedStart);
+ zs.avail_in = compressedEnd - compressedStart;
+ zs.next_out = out;
+ MOZ_ASSERT(outlen);
+ zs.avail_out = outlen;
+
+ // Bug 1505857 - Use 'volatile' so variable is preserved in crashdump
+ // when release-asserts below are tripped.
+ volatile int ret = inflateInit2(&zs, WindowBits);
+ if (ret != Z_OK) {
+ MOZ_ASSERT(ret == Z_MEM_ERROR);
+ return false;
+ }
+
+ auto autoCleanup = mozilla::MakeScopeExit([&] {
+ mozilla::DebugOnly<int> ret = inflateEnd(&zs);
+ MOZ_ASSERT(ret == Z_OK);
+ });
+
+ if (lastChunk) {
+ ret = inflate(&zs, Z_FINISH);
+ MOZ_RELEASE_ASSERT(ret == Z_STREAM_END);
+ } else {
+ ret = inflate(&zs, Z_NO_FLUSH);
+ if (ret == Z_MEM_ERROR) {
+ return false;
+ }
+ MOZ_RELEASE_ASSERT(ret == Z_OK);
+ }
+ MOZ_ASSERT(zs.avail_in == 0);
+ MOZ_ASSERT(zs.avail_out == 0);
+ return true;
+}
diff --git a/js/src/vm/Compression.h b/js/src/vm/Compression.h
new file mode 100644
index 0000000000..9566431307
--- /dev/null
+++ b/js/src/vm/Compression.h
@@ -0,0 +1,115 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Compression_h
+#define vm_Compression_h
+
+#include <zlib.h>
+
+#include "jstypes.h"
+
+#include "js/AllocPolicy.h"
+#include "js/Vector.h"
+
+namespace js {
+
+struct CompressedDataHeader {
+ uint32_t compressedBytes;
+};
+
+class Compressor {
+ public:
+ // After compressing CHUNK_SIZE bytes, we will do a full flush so we can
+ // start decompression at that point.
+ static constexpr size_t CHUNK_SIZE = 64 * 1024;
+
+ private:
+ // Number of bytes we should hand to zlib each compressMore() call.
+ static constexpr size_t MAX_INPUT_SIZE = 2 * 1024;
+
+ z_stream zs;
+ const unsigned char* inp;
+ size_t inplen;
+ size_t outbytes;
+ bool initialized;
+ bool finished;
+
+ // The number of uncompressed bytes written for the current chunk. When this
+ // reaches CHUNK_SIZE, we finish the current chunk and start a new chunk.
+ uint32_t currentChunkSize;
+
+ // At the end of each chunk (and the end of the uncompressed data if it's
+ // not a chunk boundary), we record the offset in the compressed data.
+ js::Vector<uint32_t, 8, SystemAllocPolicy> chunkOffsets;
+
+ public:
+ enum Status { MOREOUTPUT, DONE, CONTINUE, OOM };
+
+ Compressor(const unsigned char* inp, size_t inplen);
+ ~Compressor();
+ bool init();
+ void setOutput(unsigned char* out, size_t outlen);
+ /* Compress some of the input. Return true if it should be called again. */
+ Status compressMore();
+ size_t sizeOfChunkOffsets() const {
+ return chunkOffsets.length() * sizeof(chunkOffsets[0]);
+ }
+
+ // Returns the number of bytes needed to store the data currently written +
+ // the chunk offsets.
+ size_t totalBytesNeeded() const;
+
+ // Append the chunk offsets to |dest|.
+ void finish(char* dest, size_t destBytes);
+
+ static void rangeToChunkAndOffset(size_t uncompressedStart,
+ size_t uncompressedLimit,
+ size_t* firstChunk,
+ size_t* firstChunkOffset,
+ size_t* firstChunkSize, size_t* lastChunk,
+ size_t* lastChunkSize) {
+ *firstChunk = uncompressedStart / CHUNK_SIZE;
+ *firstChunkOffset = uncompressedStart % CHUNK_SIZE;
+ *firstChunkSize = CHUNK_SIZE - *firstChunkOffset;
+
+ MOZ_ASSERT(uncompressedStart < uncompressedLimit,
+ "subtraction below requires a non-empty range");
+
+ *lastChunk = (uncompressedLimit - 1) / CHUNK_SIZE;
+ *lastChunkSize = ((uncompressedLimit - 1) % CHUNK_SIZE) + 1;
+ }
+
+ static size_t chunkSize(size_t uncompressedBytes, size_t chunk) {
+ MOZ_ASSERT(uncompressedBytes > 0, "must have uncompressed data to chunk");
+
+ size_t startOfChunkBytes = chunk * CHUNK_SIZE;
+ MOZ_ASSERT(startOfChunkBytes < uncompressedBytes,
+ "chunk must refer to bytes not exceeding "
+ "|uncompressedBytes|");
+
+ size_t remaining = uncompressedBytes - startOfChunkBytes;
+ return remaining < CHUNK_SIZE ? remaining : CHUNK_SIZE;
+ }
+};
+
+/*
+ * Decompress a string. The caller must know the length of the output and
+ * allocate |out| to a string of that length.
+ */
+bool DecompressString(const unsigned char* inp, size_t inplen,
+ unsigned char* out, size_t outlen);
+
+/*
+ * Decompress a single chunk of at most Compressor::CHUNK_SIZE bytes.
+ * |chunk| is the chunk index. The caller must know the length of the output
+ * (the uncompressed chunk) and allocate |out| to a string of that length.
+ */
+bool DecompressStringChunk(const unsigned char* inp, size_t chunk,
+ unsigned char* out, size_t outlen);
+
+} /* namespace js */
+
+#endif /* vm_Compression_h */
diff --git a/js/src/vm/DateObject.h b/js/src/vm/DateObject.h
new file mode 100644
index 0000000000..48ee8934a0
--- /dev/null
+++ b/js/src/vm/DateObject.h
@@ -0,0 +1,101 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_DateObject_h_
+#define vm_DateObject_h_
+
+#include "js/Date.h"
+#include "js/Value.h"
+#include "vm/DateTime.h"
+#include "vm/NativeObject.h"
+
+namespace js {
+
+class DateObject : public NativeObject {
+ // Time in milliseconds since the (Unix) epoch.
+ static const uint32_t UTC_TIME_SLOT = 0;
+
+ // Raw time zone offset in seconds, i.e. without daylight saving adjustment,
+ // of the current system zone.
+ //
+ // This value is exclusively used to verify the cached slots are still valid.
+ //
+ // It is NOT the return value of Date.prototype.getTimezoneOffset()!
+ static const uint32_t UTC_TIME_ZONE_OFFSET_SLOT = 1;
+
+ /*
+ * Cached slots holding local properties of the date.
+ * These are undefined until the first actual lookup occurs
+ * and are reset to undefined whenever the date's time is modified.
+ */
+ static const uint32_t COMPONENTS_START_SLOT = 2;
+
+ static const uint32_t LOCAL_TIME_SLOT = COMPONENTS_START_SLOT + 0;
+ static const uint32_t LOCAL_YEAR_SLOT = COMPONENTS_START_SLOT + 1;
+ static const uint32_t LOCAL_MONTH_SLOT = COMPONENTS_START_SLOT + 2;
+ static const uint32_t LOCAL_DATE_SLOT = COMPONENTS_START_SLOT + 3;
+ static const uint32_t LOCAL_DAY_SLOT = COMPONENTS_START_SLOT + 4;
+
+ /*
+ * Unlike the above slots that hold LocalTZA-adjusted component values,
+ * LOCAL_SECONDS_INTO_YEAR_SLOT holds a composite value that can be used
+ * to compute LocalTZA-adjusted hours, minutes, and seconds values.
+ * Specifically, LOCAL_SECONDS_INTO_YEAR_SLOT holds the number of
+ * LocalTZA-adjusted seconds into the year. Unix timestamps ignore leap
+ * seconds, so recovering hours/minutes/seconds requires only trivial
+ * division/modulus operations.
+ */
+ static const uint32_t LOCAL_SECONDS_INTO_YEAR_SLOT =
+ COMPONENTS_START_SLOT + 5;
+
+ static const uint32_t RESERVED_SLOTS = LOCAL_SECONDS_INTO_YEAR_SLOT + 1;
+
+ public:
+ static const JSClass class_;
+ static const JSClass protoClass_;
+
+ js::DateTimeInfo::ShouldRFP shouldRFP() const;
+
+ JS::ClippedTime clippedTime() const {
+ double t = getFixedSlot(UTC_TIME_SLOT).toDouble();
+ JS::ClippedTime clipped = JS::TimeClip(t);
+ MOZ_ASSERT(mozilla::NumbersAreIdentical(clipped.toDouble(), t));
+ return clipped;
+ }
+
+ const js::Value& UTCTime() const { return getFixedSlot(UTC_TIME_SLOT); }
+ const js::Value& localTime() const {
+ return getReservedSlot(LOCAL_TIME_SLOT);
+ }
+
+ // Set UTC time to a given time and invalidate cached local time.
+ void setUTCTime(JS::ClippedTime t);
+ void setUTCTime(JS::ClippedTime t, MutableHandleValue vp);
+
+ // Cache the local time, year, month, and so forth of the object.
+ // If UTC time is not finite (e.g., NaN), the local time
+ // slots will be set to the UTC time without conversion.
+ void fillLocalTimeSlots();
+
+ const js::Value& localYear() const {
+ return getReservedSlot(LOCAL_YEAR_SLOT);
+ }
+ const js::Value& localMonth() const {
+ return getReservedSlot(LOCAL_MONTH_SLOT);
+ }
+ const js::Value& localDate() const {
+ return getReservedSlot(LOCAL_DATE_SLOT);
+ }
+ const js::Value& localDay() const { return getReservedSlot(LOCAL_DAY_SLOT); }
+
+ const js::Value& localSecondsIntoYear() const {
+ return getReservedSlot(LOCAL_SECONDS_INTO_YEAR_SLOT);
+ }
+};
+
+} // namespace js
+
+#endif // vm_DateObject_h_
diff --git a/js/src/vm/DateTime.cpp b/js/src/vm/DateTime.cpp
new file mode 100644
index 0000000000..0dd93e00a1
--- /dev/null
+++ b/js/src/vm/DateTime.cpp
@@ -0,0 +1,824 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/DateTime.h"
+
+#if JS_HAS_INTL_API
+# include "mozilla/intl/TimeZone.h"
+#endif
+#include "mozilla/ScopeExit.h"
+#include "mozilla/TextUtils.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <cstring>
+#include <iterator>
+#include <string_view>
+#include <time.h>
+
+#if !defined(XP_WIN)
+# include <limits.h>
+# include <unistd.h>
+#endif /* !defined(XP_WIN) */
+
+#if JS_HAS_INTL_API
+# include "builtin/intl/FormatBuffer.h"
+#endif
+#include "js/AllocPolicy.h"
+#include "js/Date.h"
+#include "js/GCAPI.h"
+#include "js/Vector.h"
+#include "threading/ExclusiveData.h"
+
+#include "util/Text.h"
+#include "vm/MutexIDs.h"
+#include "vm/Realm.h"
+
+/* static */
+js::DateTimeInfo::ShouldRFP js::DateTimeInfo::shouldRFP(JS::Realm* realm) {
+ return realm->behaviors().shouldResistFingerprinting()
+ ? DateTimeInfo::ShouldRFP::Yes
+ : DateTimeInfo::ShouldRFP::No;
+}
+
+static bool ComputeLocalTime(time_t local, struct tm* ptm) {
+ // Neither localtime_s nor localtime_r are required to act as if tzset has
+ // been called, therefore we need to explicitly call it to ensure any time
+ // zone changes are correctly picked up.
+
+#if defined(_WIN32)
+ _tzset();
+ return localtime_s(ptm, &local) == 0;
+#elif defined(HAVE_LOCALTIME_R)
+# ifndef __wasi__
+ tzset();
+# endif
+ return localtime_r(&local, ptm);
+#else
+ struct tm* otm = localtime(&local);
+ if (!otm) {
+ return false;
+ }
+ *ptm = *otm;
+ return true;
+#endif
+}
+
+static bool ComputeUTCTime(time_t t, struct tm* ptm) {
+#if defined(_WIN32)
+ return gmtime_s(ptm, &t) == 0;
+#elif defined(HAVE_GMTIME_R)
+ return gmtime_r(&t, ptm);
+#else
+ struct tm* otm = gmtime(&t);
+ if (!otm) {
+ return false;
+ }
+ *ptm = *otm;
+ return true;
+#endif
+}
+
+/*
+ * Compute the offset in seconds from the current UTC time to the current local
+ * standard time (i.e. not including any offset due to DST).
+ *
+ * Examples:
+ *
+ * Suppose we are in California, USA on January 1, 2013 at 04:00 PST (UTC-8, no
+ * DST in effect), corresponding to 12:00 UTC. This function would then return
+ * -8 * SecondsPerHour, or -28800.
+ *
+ * Or suppose we are in Berlin, Germany on July 1, 2013 at 17:00 CEST (UTC+2,
+ * DST in effect), corresponding to 15:00 UTC. This function would then return
+ * +1 * SecondsPerHour, or +3600.
+ */
+static int32_t UTCToLocalStandardOffsetSeconds() {
+ using js::SecondsPerDay;
+ using js::SecondsPerHour;
+ using js::SecondsPerMinute;
+
+ // Get the current time.
+ time_t currentMaybeWithDST = time(nullptr);
+ if (currentMaybeWithDST == time_t(-1)) {
+ return 0;
+ }
+
+ // Break down the current time into its (locally-valued, maybe with DST)
+ // components.
+ struct tm local;
+ if (!ComputeLocalTime(currentMaybeWithDST, &local)) {
+ return 0;
+ }
+
+ // Compute a |time_t| corresponding to |local| interpreted without DST.
+ time_t currentNoDST;
+ if (local.tm_isdst == 0) {
+ // If |local| wasn't DST, we can use the same time.
+ currentNoDST = currentMaybeWithDST;
+ } else {
+ // If |local| respected DST, we need a time broken down into components
+ // ignoring DST. Turn off DST in the broken-down time. Create a fresh
+ // copy of |local|, because mktime() will reset tm_isdst = 1 and will
+ // adjust tm_hour and tm_hour accordingly.
+ struct tm localNoDST = local;
+ localNoDST.tm_isdst = 0;
+
+ // Compute a |time_t t| corresponding to the broken-down time with DST
+ // off. This has boundary-condition issues (for about the duration of
+ // a DST offset) near the time a location moves to a different time
+ // zone. But 1) errors will be transient; 2) locations rarely change
+ // time zone; and 3) in the absence of an API that provides the time
+ // zone offset directly, this may be the best we can do.
+ currentNoDST = mktime(&localNoDST);
+ if (currentNoDST == time_t(-1)) {
+ return 0;
+ }
+ }
+
+ // Break down the time corresponding to the no-DST |local| into UTC-based
+ // components.
+ struct tm utc;
+ if (!ComputeUTCTime(currentNoDST, &utc)) {
+ return 0;
+ }
+
+ // Finally, compare the seconds-based components of the local non-DST
+ // representation and the UTC representation to determine the actual
+ // difference.
+ int utc_secs =
+ utc.tm_hour * SecondsPerHour + utc.tm_min * int(SecondsPerMinute);
+ int local_secs =
+ local.tm_hour * SecondsPerHour + local.tm_min * int(SecondsPerMinute);
+
+ // Same-day? Just subtract the seconds counts.
+ if (utc.tm_mday == local.tm_mday) {
+ return local_secs - utc_secs;
+ }
+
+ // If we have more UTC seconds, move local seconds into the UTC seconds'
+ // frame of reference and then subtract.
+ if (utc_secs > local_secs) {
+ return (SecondsPerDay + local_secs) - utc_secs;
+ }
+
+ // Otherwise we have more local seconds, so move the UTC seconds into the
+ // local seconds' frame of reference and then subtract.
+ return local_secs - (utc_secs + SecondsPerDay);
+}
+
+void js::DateTimeInfo::internalResetTimeZone(ResetTimeZoneMode mode) {
+ // Nothing to do when an update request is already enqueued.
+ if (timeZoneStatus_ == TimeZoneStatus::NeedsUpdate) {
+ return;
+ }
+
+ // Mark the state as needing an update, but defer the actual update until it's
+ // actually needed to delay any system calls to the last possible moment. This
+ // is beneficial when this method is called during start-up, because it avoids
+ // main-thread I/O blocking the process.
+ if (mode == ResetTimeZoneMode::ResetEvenIfOffsetUnchanged) {
+ timeZoneStatus_ = TimeZoneStatus::NeedsUpdate;
+ } else {
+ timeZoneStatus_ = TimeZoneStatus::UpdateIfChanged;
+ }
+}
+
+void js::DateTimeInfo::updateTimeZone() {
+ MOZ_ASSERT(timeZoneStatus_ != TimeZoneStatus::Valid);
+
+ bool updateIfChanged = timeZoneStatus_ == TimeZoneStatus::UpdateIfChanged;
+
+ timeZoneStatus_ = TimeZoneStatus::Valid;
+
+ /*
+ * The difference between local standard time and UTC will never change for
+ * a given time zone.
+ */
+ int32_t newOffset = UTCToLocalStandardOffsetSeconds();
+
+ if (updateIfChanged && newOffset == utcToLocalStandardOffsetSeconds_) {
+ return;
+ }
+
+ utcToLocalStandardOffsetSeconds_ = newOffset;
+
+ dstRange_.reset();
+
+#if JS_HAS_INTL_API
+ utcRange_.reset();
+ localRange_.reset();
+
+ {
+ // Tell the analysis the |pFree| function pointer called by uprv_free
+ // cannot GC.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ timeZone_ = nullptr;
+ }
+
+ standardName_ = nullptr;
+ daylightSavingsName_ = nullptr;
+#endif /* JS_HAS_INTL_API */
+
+ // Propagate the time zone change to ICU, too.
+ {
+ // Tell the analysis calling into ICU cannot GC.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ internalResyncICUDefaultTimeZone();
+ }
+}
+
+js::DateTimeInfo::DateTimeInfo(bool shouldResistFingerprinting)
+ : shouldResistFingerprinting_(shouldResistFingerprinting) {
+ // Set the time zone status into the invalid state, so we compute the actual
+ // defaults on first access. We don't yet want to initialize neither <ctime>
+ // nor ICU's time zone classes, because that may cause I/O operations slowing
+ // down the JS engine initialization, which we're currently in the middle of.
+ timeZoneStatus_ = TimeZoneStatus::NeedsUpdate;
+}
+
+js::DateTimeInfo::~DateTimeInfo() = default;
+
+int64_t js::DateTimeInfo::toClampedSeconds(int64_t milliseconds) {
+ int64_t seconds = milliseconds / int64_t(msPerSecond);
+ int64_t millis = milliseconds % int64_t(msPerSecond);
+
+ // Round towards the start of time.
+ if (millis < 0) {
+ seconds -= 1;
+ }
+
+ if (seconds > MaxTimeT) {
+ seconds = MaxTimeT;
+ } else if (seconds < MinTimeT) {
+ /* Go ahead a day to make localtime work (does not work with 0). */
+ seconds = SecondsPerDay;
+ }
+ return seconds;
+}
+
+int32_t js::DateTimeInfo::computeDSTOffsetMilliseconds(int64_t utcSeconds) {
+ MOZ_ASSERT(utcSeconds >= MinTimeT);
+ MOZ_ASSERT(utcSeconds <= MaxTimeT);
+
+#if JS_HAS_INTL_API
+ int64_t utcMilliseconds = utcSeconds * int64_t(msPerSecond);
+
+ return timeZone()->GetDSTOffsetMs(utcMilliseconds).unwrapOr(0);
+#else
+ struct tm tm;
+ if (!ComputeLocalTime(static_cast<time_t>(utcSeconds), &tm)) {
+ return 0;
+ }
+
+ // NB: The offset isn't computed correctly when the standard local offset
+ // at |utcSeconds| is different from |utcToLocalStandardOffsetSeconds|.
+ int32_t dayoff =
+ int32_t((utcSeconds + utcToLocalStandardOffsetSeconds_) % SecondsPerDay);
+ int32_t tmoff = tm.tm_sec + (tm.tm_min * SecondsPerMinute) +
+ (tm.tm_hour * SecondsPerHour);
+
+ int32_t diff = tmoff - dayoff;
+
+ if (diff < 0) {
+ diff += SecondsPerDay;
+ } else if (uint32_t(diff) >= SecondsPerDay) {
+ diff -= SecondsPerDay;
+ }
+
+ return diff * int32_t(msPerSecond);
+#endif /* JS_HAS_INTL_API */
+}
+
+int32_t js::DateTimeInfo::internalGetDSTOffsetMilliseconds(
+ int64_t utcMilliseconds) {
+ int64_t utcSeconds = toClampedSeconds(utcMilliseconds);
+ return getOrComputeValue(dstRange_, utcSeconds,
+ &DateTimeInfo::computeDSTOffsetMilliseconds);
+}
+
+int32_t js::DateTimeInfo::getOrComputeValue(RangeCache& range, int64_t seconds,
+ ComputeFn compute) {
+ range.sanityCheck();
+
+ auto checkSanity =
+ mozilla::MakeScopeExit([&range]() { range.sanityCheck(); });
+
+ // NB: Be aware of the initial range values when making changes to this
+ // code: the first call to this method, with those initial range
+ // values, must result in a cache miss.
+ MOZ_ASSERT(seconds != INT64_MIN);
+
+ if (range.startSeconds <= seconds && seconds <= range.endSeconds) {
+ return range.offsetMilliseconds;
+ }
+
+ if (range.oldStartSeconds <= seconds && seconds <= range.oldEndSeconds) {
+ return range.oldOffsetMilliseconds;
+ }
+
+ range.oldOffsetMilliseconds = range.offsetMilliseconds;
+ range.oldStartSeconds = range.startSeconds;
+ range.oldEndSeconds = range.endSeconds;
+
+ if (range.startSeconds <= seconds) {
+ int64_t newEndSeconds =
+ std::min({range.endSeconds + RangeExpansionAmount, MaxTimeT});
+ if (newEndSeconds >= seconds) {
+ int32_t endOffsetMilliseconds = (this->*compute)(newEndSeconds);
+ if (endOffsetMilliseconds == range.offsetMilliseconds) {
+ range.endSeconds = newEndSeconds;
+ return range.offsetMilliseconds;
+ }
+
+ range.offsetMilliseconds = (this->*compute)(seconds);
+ if (range.offsetMilliseconds == endOffsetMilliseconds) {
+ range.startSeconds = seconds;
+ range.endSeconds = newEndSeconds;
+ } else {
+ range.endSeconds = seconds;
+ }
+ return range.offsetMilliseconds;
+ }
+
+ range.offsetMilliseconds = (this->*compute)(seconds);
+ range.startSeconds = range.endSeconds = seconds;
+ return range.offsetMilliseconds;
+ }
+
+ int64_t newStartSeconds =
+ std::max<int64_t>({range.startSeconds - RangeExpansionAmount, MinTimeT});
+ if (newStartSeconds <= seconds) {
+ int32_t startOffsetMilliseconds = (this->*compute)(newStartSeconds);
+ if (startOffsetMilliseconds == range.offsetMilliseconds) {
+ range.startSeconds = newStartSeconds;
+ return range.offsetMilliseconds;
+ }
+
+ range.offsetMilliseconds = (this->*compute)(seconds);
+ if (range.offsetMilliseconds == startOffsetMilliseconds) {
+ range.startSeconds = newStartSeconds;
+ range.endSeconds = seconds;
+ } else {
+ range.startSeconds = seconds;
+ }
+ return range.offsetMilliseconds;
+ }
+
+ range.startSeconds = range.endSeconds = seconds;
+ range.offsetMilliseconds = (this->*compute)(seconds);
+ return range.offsetMilliseconds;
+}
+
+void js::DateTimeInfo::RangeCache::reset() {
+ // The initial range values are carefully chosen to result in a cache miss
+ // on first use given the range of possible values. Be careful to keep
+ // these values and the caching algorithm in sync!
+ offsetMilliseconds = 0;
+ startSeconds = endSeconds = INT64_MIN;
+ oldOffsetMilliseconds = 0;
+ oldStartSeconds = oldEndSeconds = INT64_MIN;
+
+ sanityCheck();
+}
+
+void js::DateTimeInfo::RangeCache::sanityCheck() {
+ auto assertRange = [](int64_t start, int64_t end) {
+ MOZ_ASSERT(start <= end);
+ MOZ_ASSERT_IF(start == INT64_MIN, end == INT64_MIN);
+ MOZ_ASSERT_IF(end == INT64_MIN, start == INT64_MIN);
+ MOZ_ASSERT_IF(start != INT64_MIN, start >= MinTimeT && end >= MinTimeT);
+ MOZ_ASSERT_IF(start != INT64_MIN, start <= MaxTimeT && end <= MaxTimeT);
+ };
+
+ assertRange(startSeconds, endSeconds);
+ assertRange(oldStartSeconds, oldEndSeconds);
+}
+
+#if JS_HAS_INTL_API
+int32_t js::DateTimeInfo::computeUTCOffsetMilliseconds(int64_t localSeconds) {
+ MOZ_ASSERT(localSeconds >= MinTimeT);
+ MOZ_ASSERT(localSeconds <= MaxTimeT);
+
+ int64_t localMilliseconds = localSeconds * int64_t(msPerSecond);
+
+ return timeZone()->GetUTCOffsetMs(localMilliseconds).unwrapOr(0);
+}
+
+int32_t js::DateTimeInfo::computeLocalOffsetMilliseconds(int64_t utcSeconds) {
+ MOZ_ASSERT(utcSeconds >= MinTimeT);
+ MOZ_ASSERT(utcSeconds <= MaxTimeT);
+
+ UDate utcMilliseconds = UDate(utcSeconds * int64_t(msPerSecond));
+
+ return timeZone()->GetOffsetMs(utcMilliseconds).unwrapOr(0);
+}
+
+int32_t js::DateTimeInfo::internalGetOffsetMilliseconds(int64_t milliseconds,
+ TimeZoneOffset offset) {
+ int64_t seconds = toClampedSeconds(milliseconds);
+ return offset == TimeZoneOffset::UTC
+ ? getOrComputeValue(localRange_, seconds,
+ &DateTimeInfo::computeLocalOffsetMilliseconds)
+ : getOrComputeValue(utcRange_, seconds,
+ &DateTimeInfo::computeUTCOffsetMilliseconds);
+}
+
+bool js::DateTimeInfo::internalTimeZoneDisplayName(char16_t* buf, size_t buflen,
+ int64_t utcMilliseconds,
+ const char* locale) {
+ MOZ_ASSERT(buf != nullptr);
+ MOZ_ASSERT(buflen > 0);
+ MOZ_ASSERT(locale != nullptr);
+
+ // Clear any previously cached names when the default locale changed.
+ if (!locale_ || std::strcmp(locale_.get(), locale) != 0) {
+ locale_ = DuplicateString(locale);
+ if (!locale_) {
+ return false;
+ }
+
+ standardName_.reset();
+ daylightSavingsName_.reset();
+ }
+
+ using DaylightSavings = mozilla::intl::TimeZone::DaylightSavings;
+
+ auto daylightSavings = internalGetDSTOffsetMilliseconds(utcMilliseconds) != 0
+ ? DaylightSavings::Yes
+ : DaylightSavings::No;
+
+ JS::UniqueTwoByteChars& cachedName = (daylightSavings == DaylightSavings::Yes)
+ ? daylightSavingsName_
+ : standardName_;
+ if (!cachedName) {
+ // Retrieve the display name for the given locale.
+
+ intl::FormatBuffer<char16_t, 0, js::SystemAllocPolicy> buffer;
+ if (timeZone()->GetDisplayName(locale, daylightSavings, buffer).isErr()) {
+ return false;
+ }
+
+ cachedName = buffer.extractStringZ();
+ if (!cachedName) {
+ return false;
+ }
+ }
+
+ // Return an empty string if the display name doesn't fit into the buffer.
+ size_t length = js_strlen(cachedName.get());
+ if (length < buflen) {
+ std::copy(cachedName.get(), cachedName.get() + length, buf);
+ } else {
+ length = 0;
+ }
+
+ buf[length] = '\0';
+ return true;
+}
+
+mozilla::intl::TimeZone* js::DateTimeInfo::timeZone() {
+ if (!timeZone_) {
+ // For resist finger printing mode we always use the UTC time zone.
+ mozilla::Maybe<mozilla::Span<const char16_t>> timeZoneOverride;
+ if (shouldResistFingerprinting_) {
+ timeZoneOverride = mozilla::Some(mozilla::MakeStringSpan(u"UTC"));
+ }
+
+ auto timeZone = mozilla::intl::TimeZone::TryCreate(timeZoneOverride);
+
+ // Creating the default or UTC time zone should never fail. If it should
+ // fail nonetheless for some reason, just crash because we don't have a way
+ // to propagate any errors.
+ MOZ_RELEASE_ASSERT(timeZone.isOk());
+
+ timeZone_ = timeZone.unwrap();
+ MOZ_ASSERT(timeZone_);
+ }
+
+ return timeZone_.get();
+}
+#endif /* JS_HAS_INTL_API */
+
+/* static */ js::ExclusiveData<js::DateTimeInfo>* js::DateTimeInfo::instance;
+/* static */ js::ExclusiveData<js::DateTimeInfo>* js::DateTimeInfo::instanceRFP;
+
+bool js::InitDateTimeState() {
+ MOZ_ASSERT(!DateTimeInfo::instance && !DateTimeInfo::instanceRFP,
+ "we should be initializing only once");
+
+ DateTimeInfo::instance =
+ js_new<ExclusiveData<DateTimeInfo>>(mutexid::DateTimeInfoMutex, false);
+ DateTimeInfo::instanceRFP =
+ js_new<ExclusiveData<DateTimeInfo>>(mutexid::DateTimeInfoMutex, true);
+ return DateTimeInfo::instance && DateTimeInfo::instanceRFP;
+}
+
+/* static */
+void js::FinishDateTimeState() {
+ js_delete(DateTimeInfo::instance);
+ DateTimeInfo::instance = nullptr;
+}
+
+void js::ResetTimeZoneInternal(ResetTimeZoneMode mode) {
+ js::DateTimeInfo::resetTimeZone(mode);
+}
+
+JS_PUBLIC_API void JS::ResetTimeZone() {
+ js::ResetTimeZoneInternal(js::ResetTimeZoneMode::ResetEvenIfOffsetUnchanged);
+}
+
+#if JS_HAS_INTL_API
+# if defined(XP_WIN)
+static bool IsOlsonCompatibleWindowsTimeZoneId(std::string_view tz) {
+ // ICU ignores the TZ environment variable on Windows and instead directly
+ // invokes Win API functions to retrieve the current time zone. But since
+ // we're still using the POSIX-derived localtime_s() function on Windows
+ // and localtime_s() does return a time zone adjusted value based on the
+ // TZ environment variable, we need to manually adjust the default ICU
+ // time zone if TZ is set.
+ //
+ // Windows supports the following format for TZ: tzn[+|-]hh[:mm[:ss]][dzn]
+ // where "tzn" is the time zone name for standard time, the time zone
+ // offset is positive for time zones west of GMT, and "dzn" is the
+ // optional time zone name when daylight savings are observed. Daylight
+ // savings are always based on the U.S. daylight saving rules, that means
+ // for example it's not possible to use "TZ=CET-1CEST" to select the IANA
+ // time zone "CET".
+ //
+ // When comparing this restricted format for TZ to all IANA time zone
+ // names, the following time zones are in the intersection of what's
+ // supported by Windows and is also a valid IANA time zone identifier.
+ //
+ // Even though the time zone offset is marked as mandatory on MSDN, it
+ // appears it defaults to zero when omitted. This in turn means we can
+ // also allow the time zone identifiers "UCT", "UTC", and "GMT".
+
+ static const char* const allowedIds[] = {
+ // From tzdata's "northamerica" file:
+ "EST5EDT",
+ "CST6CDT",
+ "MST7MDT",
+ "PST8PDT",
+
+ // From tzdata's "backward" file:
+ "GMT+0",
+ "GMT-0",
+ "GMT0",
+ "UCT",
+ "UTC",
+
+ // From tzdata's "etcetera" file:
+ "GMT",
+ };
+ for (const auto& allowedId : allowedIds) {
+ if (tz == allowedId) {
+ return true;
+ }
+ }
+ return false;
+}
+# else
+static std::string_view TZContainsAbsolutePath(std::string_view tzVar) {
+ // A TZ environment variable may be an absolute path. The path
+ // format of TZ may begin with a colon. (ICU handles relative paths.)
+ if (tzVar.length() > 1 && tzVar[0] == ':' && tzVar[1] == '/') {
+ return tzVar.substr(1);
+ }
+ if (tzVar.length() > 0 && tzVar[0] == '/') {
+ return tzVar;
+ }
+ return {};
+}
+
+/**
+ * Reject the input if it doesn't match the time zone id pattern or legacy time
+ * zone names.
+ *
+ * See <https://github.com/eggert/tz/blob/master/theory.html>.
+ */
+static bool IsTimeZoneId(std::string_view timeZone) {
+ size_t timeZoneLen = timeZone.length();
+
+ if (timeZoneLen == 0) {
+ return false;
+ }
+
+ for (size_t i = 0; i < timeZoneLen; i++) {
+ char c = timeZone[i];
+
+ // According to theory.html, '.' is allowed in time zone ids, but the
+ // accompanying zic.c file doesn't allow it. Assume the source file is
+ // correct and disallow '.' here, too.
+ if (mozilla::IsAsciiAlphanumeric(c) || c == '_' || c == '-' || c == '+') {
+ continue;
+ }
+
+ // Reject leading, trailing, or consecutive '/' characters.
+ if (c == '/' && i > 0 && i + 1 < timeZoneLen && timeZone[i + 1] != '/') {
+ continue;
+ }
+
+ return false;
+ }
+
+ return true;
+}
+
+using TimeZoneIdentifierVector =
+ js::Vector<char, mozilla::intl::TimeZone::TimeZoneIdentifierLength,
+ js::SystemAllocPolicy>;
+
+/**
+ * Given a presumptive path |tz| to a zoneinfo time zone file
+ * (e.g. /etc/localtime), attempt to compute the time zone encoded by that
+ * path by repeatedly resolving symlinks until a path containing "/zoneinfo/"
+ * followed by time zone looking components is found. If a symlink is broken,
+ * symlink-following recurs too deeply, non time zone looking components are
+ * encountered, or some other error is encountered, then the |result| buffer is
+ * left empty.
+ *
+ * If |result| is set to a non-empty string, it's only guaranteed to have
+ * certain syntactic validity. It might not actually *be* a time zone name.
+ *
+ * If there's an (OOM) error, |false| is returned.
+ */
+static bool ReadTimeZoneLink(std::string_view tz,
+ TimeZoneIdentifierVector& result) {
+ MOZ_ASSERT(!tz.empty());
+ MOZ_ASSERT(result.empty());
+
+ // The resolved link name can have different paths depending on the OS.
+ // Follow ICU and only search for "/zoneinfo/"; see $ICU/common/putil.cpp.
+ static constexpr char ZoneInfoPath[] = "/zoneinfo/";
+ constexpr size_t ZoneInfoPathLength = js_strlen(ZoneInfoPath);
+
+ // Stop following symlinks after a fixed depth, because some common time
+ // zones are stored in files whose name doesn't match an Olson time zone
+ // name. For example on Ubuntu, "/usr/share/zoneinfo/America/New_York" is a
+ // symlink to "/usr/share/zoneinfo/posixrules" and "posixrules" is not an
+ // Olson time zone name.
+ // Four hops should be a reasonable limit for most use cases.
+ constexpr uint32_t FollowDepthLimit = 4;
+
+# ifdef PATH_MAX
+ constexpr size_t PathMax = PATH_MAX;
+# else
+ constexpr size_t PathMax = 4096;
+# endif
+ static_assert(PathMax > 0, "PathMax should be larger than zero");
+
+ char linkName[PathMax];
+ constexpr size_t linkNameLen =
+ std::size(linkName) - 1; // -1 to null-terminate.
+
+ // Return if the TZ value is too large.
+ if (tz.length() > linkNameLen) {
+ return true;
+ }
+
+ tz.copy(linkName, tz.length());
+ linkName[tz.length()] = '\0';
+
+ char linkTarget[PathMax];
+ constexpr size_t linkTargetLen =
+ std::size(linkTarget) - 1; // -1 to null-terminate.
+
+ uint32_t depth = 0;
+
+ // Search until we find "/zoneinfo/" in the link name.
+ const char* timeZoneWithZoneInfo;
+ while (!(timeZoneWithZoneInfo = std::strstr(linkName, ZoneInfoPath))) {
+ // Return if the symlink nesting is too deep.
+ if (++depth > FollowDepthLimit) {
+ return true;
+ }
+
+ // Return on error or if the result was truncated.
+ ssize_t slen = readlink(linkName, linkTarget, linkTargetLen);
+ if (slen < 0 || size_t(slen) >= linkTargetLen) {
+ return true;
+ }
+
+ // Ensure linkTarget is null-terminated. (readlink may not necessarily
+ // null-terminate the string.)
+ size_t len = size_t(slen);
+ linkTarget[len] = '\0';
+
+ // If the target is absolute, continue with that.
+ if (linkTarget[0] == '/') {
+ std::strcpy(linkName, linkTarget);
+ continue;
+ }
+
+ // If the target is relative, it must be resolved against either the
+ // directory the link was in, or against the current working directory.
+ char* separator = std::strrchr(linkName, '/');
+
+ // If the link name is just something like "foo", resolve linkTarget
+ // against the current working directory.
+ if (!separator) {
+ std::strcpy(linkName, linkTarget);
+ continue;
+ }
+
+ // Remove everything after the final path separator in linkName.
+ separator[1] = '\0';
+
+ // Return if the concatenated path name is too large.
+ if (std::strlen(linkName) + len > linkNameLen) {
+ return true;
+ }
+
+ // Keep it simple and just concatenate the path names.
+ std::strcat(linkName, linkTarget);
+ }
+
+ std::string_view timeZone(timeZoneWithZoneInfo + ZoneInfoPathLength);
+ if (!IsTimeZoneId(timeZone)) {
+ return true;
+ }
+ return result.append(timeZone.data(), timeZone.length());
+}
+# endif /* defined(XP_WIN) */
+#endif /* JS_HAS_INTL_API */
+
+void js::DateTimeInfo::internalResyncICUDefaultTimeZone() {
+#if JS_HAS_INTL_API
+ // In the future we should not be setting a default ICU time zone at all,
+ // instead all accesses should go through the appropriate DateTimeInfo
+ // instance depending on the resist fingerprinting status. For now we return
+ // early to prevent overwriting the default time zone with the UTC time zone
+ // used by RFP.
+ if (shouldResistFingerprinting_) {
+ return;
+ }
+
+ if (const char* tzenv = std::getenv("TZ")) {
+ std::string_view tz(tzenv);
+
+ mozilla::Span<const char> tzid;
+
+# if defined(XP_WIN)
+ // If TZ is set and its value is valid under Windows' and IANA's time zone
+ // identifier rules, update the ICU default time zone to use this value.
+ if (IsOlsonCompatibleWindowsTimeZoneId(tz)) {
+ tzid = mozilla::Span(tz.data(), tz.length());
+ } else {
+ // If |tz| isn't a supported time zone identifier, use the default Windows
+ // time zone for ICU.
+ // TODO: Handle invalid time zone identifiers (bug 342068).
+ }
+# else
+ // The TZ environment variable allows both absolute and relative paths,
+ // optionally beginning with a colon (':'). (Relative paths, without the
+ // colon, are just Olson time zone names.) We need to handle absolute paths
+ // ourselves, including handling that they might be symlinks.
+ // <https://unicode-org.atlassian.net/browse/ICU-13694>
+ TimeZoneIdentifierVector tzidVector;
+ std::string_view tzlink = TZContainsAbsolutePath(tz);
+ if (!tzlink.empty()) {
+ if (!ReadTimeZoneLink(tzlink, tzidVector)) {
+ // Ignore OOM.
+ return;
+ }
+ tzid = tzidVector;
+ }
+
+# ifdef ANDROID
+ // ICU ignores the TZ environment variable on Android. If it doesn't contain
+ // an absolute path, try to parse it as a time zone name.
+ else if (IsTimeZoneId(tz)) {
+ tzid = mozilla::Span(tz.data(), tz.length());
+ }
+# endif
+# endif /* defined(XP_WIN) */
+
+ if (!tzid.empty()) {
+ auto result = mozilla::intl::TimeZone::SetDefaultTimeZone(tzid);
+ if (result.isErr()) {
+ // Intentionally ignore any errors, because we don't have a good way to
+ // report errors from this function.
+ return;
+ }
+
+ // Return if the default time zone was successfully updated.
+ if (result.unwrap()) {
+ return;
+ }
+
+ // If SetDefaultTimeZone() succeeded, but the default time zone wasn't
+ // changed, proceed to set the default time zone from the host time zone.
+ }
+ }
+
+ // Intentionally ignore any errors, because we don't have a good way to report
+ // errors from this function.
+ (void)mozilla::intl::TimeZone::SetDefaultTimeZoneFromHostTimeZone();
+#endif
+}
diff --git a/js/src/vm/DateTime.h b/js/src/vm/DateTime.h
new file mode 100644
index 0000000000..20feae33a8
--- /dev/null
+++ b/js/src/vm/DateTime.h
@@ -0,0 +1,388 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_DateTime_h
+#define vm_DateTime_h
+
+#include "mozilla/UniquePtr.h"
+
+#include <stdint.h>
+
+#include "js/Utility.h"
+#include "threading/ExclusiveData.h"
+
+#if JS_HAS_INTL_API
+# include "mozilla/intl/ICU4CGlue.h"
+# include "mozilla/intl/TimeZone.h"
+#endif
+
+namespace JS {
+class Realm;
+}
+
+namespace js {
+
+/* Constants defined by ES5 15.9.1.10. */
+constexpr double HoursPerDay = 24;
+constexpr double MinutesPerHour = 60;
+constexpr double SecondsPerMinute = 60;
+constexpr double msPerSecond = 1000;
+constexpr double msPerMinute = msPerSecond * SecondsPerMinute;
+constexpr double msPerHour = msPerMinute * MinutesPerHour;
+
+/* ES5 15.9.1.2. */
+constexpr double msPerDay = msPerHour * HoursPerDay;
+
+/*
+ * Additional quantities not mentioned in the spec. Be careful using these!
+ * They aren't doubles and aren't defined in terms of all the other constants.
+ * If you need constants that trigger floating point semantics, you'll have to
+ * manually cast to get it.
+ */
+constexpr unsigned SecondsPerHour = 60 * 60;
+constexpr unsigned SecondsPerDay = SecondsPerHour * 24;
+
+constexpr double StartOfTime = -8.64e15;
+constexpr double EndOfTime = 8.64e15;
+
+extern bool InitDateTimeState();
+
+extern void FinishDateTimeState();
+
+enum class ResetTimeZoneMode : bool {
+ DontResetIfOffsetUnchanged,
+ ResetEvenIfOffsetUnchanged,
+};
+
+/**
+ * Engine-internal variant of JS::ResetTimeZone with an additional flag to
+ * control whether to forcibly reset all time zone data (this is the default
+ * behavior when calling JS::ResetTimeZone) or to try to reuse the previous
+ * time zone data.
+ */
+extern void ResetTimeZoneInternal(ResetTimeZoneMode mode);
+
+/**
+ * Stores date/time information, particularly concerning the current local
+ * time zone, and implements a small cache for daylight saving time offset
+ * computation.
+ *
+ * The basic idea is premised upon this fact: the DST offset never changes more
+ * than once in any thirty-day period. If we know the offset at t_0 is o_0,
+ * the offset at [t_1, t_2] is also o_0, where t_1 + 3_0 days == t_2,
+ * t_1 <= t_0, and t0 <= t2. (In other words, t_0 is always somewhere within a
+ * thirty-day range where the DST offset is constant: DST changes never occur
+ * more than once in any thirty-day period.) Therefore, if we intelligently
+ * retain knowledge of the offset for a range of dates (which may vary over
+ * time), and if requests are usually for dates within that range, we can often
+ * provide a response without repeated offset calculation.
+ *
+ * Our caching strategy is as follows: on the first request at date t_0 compute
+ * the requested offset o_0. Save { start: t_0, end: t_0, offset: o_0 } as the
+ * cache's state. Subsequent requests within that range are straightforwardly
+ * handled. If a request for t_i is far outside the range (more than thirty
+ * days), compute o_i = dstOffset(t_i) and save { start: t_i, end: t_i,
+ * offset: t_i }. Otherwise attempt to *overextend* the range to either
+ * [start - 30d, end] or [start, end + 30d] as appropriate to encompass
+ * t_i. If the offset o_i30 is the same as the cached offset, extend the
+ * range. Otherwise the over-guess crossed a DST change -- compute
+ * o_i = dstOffset(t_i) and either extend the original range (if o_i == offset)
+ * or start a new one beneath/above the current one with o_i30 as the offset.
+ *
+ * This cache strategy results in 0 to 2 DST offset computations. The naive
+ * always-compute strategy is 1 computation, and since cache maintenance is a
+ * handful of integer arithmetic instructions the speed difference between
+ * always-1 and 1-with-cache is negligible. Caching loses if two computations
+ * happen: when the date is within 30 days of the cached range and when that
+ * 30-day range crosses a DST change. This is relatively uncommon. Further,
+ * instances of such are often dominated by in-range hits, so caching is an
+ * overall slight win.
+ *
+ * Why 30 days? For correctness the duration must be smaller than any possible
+ * duration between DST changes. Past that, note that 1) a large duration
+ * increases the likelihood of crossing a DST change while reducing the number
+ * of cache misses, and 2) a small duration decreases the size of the cached
+ * range while producing more misses. Using a month as the interval change is
+ * a balance between these two that tries to optimize for the calendar month at
+ * a time that a site might display. (One could imagine an adaptive duration
+ * that accommodates near-DST-change dates better; we don't believe the
+ * potential win from better caching offsets the loss from extra complexity.)
+ */
+class DateTimeInfo {
+ public:
+ // Whether we should resist fingerprinting. For realms in RFP mode a separate
+ // DateTimeInfo instance is used that is always in the UTC time zone.
+ enum class ShouldRFP { No, Yes };
+
+ private:
+ static ExclusiveData<DateTimeInfo>* instance;
+ static ExclusiveData<DateTimeInfo>* instanceRFP;
+
+ friend class ExclusiveData<DateTimeInfo>;
+
+ friend bool InitDateTimeState();
+ friend void FinishDateTimeState();
+
+ explicit DateTimeInfo(bool shouldResistFingerprinting);
+ ~DateTimeInfo();
+
+ static auto acquireLockWithValidTimeZone(ShouldRFP shouldRFP) {
+ auto guard =
+ shouldRFP == ShouldRFP::Yes ? instanceRFP->lock() : instance->lock();
+ if (guard->timeZoneStatus_ != TimeZoneStatus::Valid) {
+ guard->updateTimeZone();
+ }
+ return guard;
+ }
+
+ public:
+ static ShouldRFP shouldRFP(JS::Realm* realm);
+
+ // The spec implicitly assumes DST and time zone adjustment information
+ // never change in the course of a function -- sometimes even across
+ // reentrancy. So make critical sections as narrow as possible.
+
+ /**
+ * Get the DST offset in milliseconds at a UTC time. This is usually
+ * either 0 or |msPerSecond * SecondsPerHour|, but at least one exotic time
+ * zone (Lord Howe Island, Australia) has a fractional-hour offset, just to
+ * keep things interesting.
+ */
+ static int32_t getDSTOffsetMilliseconds(ShouldRFP shouldRFP,
+ int64_t utcMilliseconds) {
+ auto guard = acquireLockWithValidTimeZone(shouldRFP);
+ return guard->internalGetDSTOffsetMilliseconds(utcMilliseconds);
+ }
+
+ /**
+ * The offset in seconds from the current UTC time to the current local
+ * standard time (i.e. not including any offset due to DST) as computed by the
+ * operating system.
+ */
+ static int32_t utcToLocalStandardOffsetSeconds(ShouldRFP shouldRFP) {
+ auto guard = acquireLockWithValidTimeZone(shouldRFP);
+ return guard->utcToLocalStandardOffsetSeconds_;
+ }
+
+#if JS_HAS_INTL_API
+ enum class TimeZoneOffset { UTC, Local };
+
+ /**
+ * Return the time zone offset, including DST, in milliseconds at the
+ * given time. The input time can be either at UTC or at local time.
+ */
+ static int32_t getOffsetMilliseconds(ShouldRFP shouldRFP,
+ int64_t milliseconds,
+ TimeZoneOffset offset) {
+ auto guard = acquireLockWithValidTimeZone(shouldRFP);
+ return guard->internalGetOffsetMilliseconds(milliseconds, offset);
+ }
+
+ /**
+ * Copy the display name for the current time zone at the given time,
+ * localized for the specified locale, into the supplied buffer. If the
+ * buffer is too small, an empty string is stored. The stored display name
+ * is null-terminated in any case.
+ */
+ static bool timeZoneDisplayName(ShouldRFP shouldRFP, char16_t* buf,
+ size_t buflen, int64_t utcMilliseconds,
+ const char* locale) {
+ auto guard = acquireLockWithValidTimeZone(shouldRFP);
+ return guard->internalTimeZoneDisplayName(buf, buflen, utcMilliseconds,
+ locale);
+ }
+
+ /**
+ * Copy the identifier for the current time zone to the provided resizable
+ * buffer.
+ */
+ template <typename B>
+ static mozilla::intl::ICUResult timeZoneId(ShouldRFP shouldRFP, B& buffer) {
+ auto guard = acquireLockWithValidTimeZone(shouldRFP);
+ return guard->timeZone()->GetId(buffer);
+ }
+
+ /**
+ * A number indicating the raw offset from GMT in milliseconds.
+ */
+ static mozilla::Result<int32_t, mozilla::intl::ICUError> getRawOffsetMs(
+ ShouldRFP shouldRFP) {
+ auto guard = acquireLockWithValidTimeZone(shouldRFP);
+ return guard->timeZone()->GetRawOffsetMs();
+ }
+#else
+ /**
+ * Return the local time zone adjustment (ES2019 20.3.1.7) as computed by
+ * the operating system.
+ */
+ static int32_t localTZA(ShouldRFP shouldRFP) {
+ return utcToLocalStandardOffsetSeconds(shouldRFP) * msPerSecond;
+ }
+#endif /* JS_HAS_INTL_API */
+
+ private:
+ // The method below should only be called via js::ResetTimeZoneInternal().
+ friend void js::ResetTimeZoneInternal(ResetTimeZoneMode);
+
+ static void resetTimeZone(ResetTimeZoneMode mode) {
+ {
+ auto guard = instance->lock();
+ guard->internalResetTimeZone(mode);
+ }
+ {
+ // Only needed to initialize the default state and any later call will
+ // perform an unnecessary reset.
+ auto guard = instanceRFP->lock();
+ guard->internalResetTimeZone(mode);
+ }
+ }
+
+ struct RangeCache {
+ // Start and end offsets in seconds describing the current and the
+ // last cached range.
+ int64_t startSeconds, endSeconds;
+ int64_t oldStartSeconds, oldEndSeconds;
+
+ // The current and the last cached offset in milliseconds.
+ int32_t offsetMilliseconds;
+ int32_t oldOffsetMilliseconds;
+
+ void reset();
+
+ void sanityCheck();
+ };
+
+ bool shouldResistFingerprinting_;
+
+ enum class TimeZoneStatus : uint8_t { Valid, NeedsUpdate, UpdateIfChanged };
+
+ TimeZoneStatus timeZoneStatus_;
+
+ /**
+ * The offset in seconds from the current UTC time to the current local
+ * standard time (i.e. not including any offset due to DST) as computed by the
+ * operating system.
+ *
+ * Cached because retrieving this dynamically is Slow, and a certain venerable
+ * benchmark which shall not be named depends on it being fast.
+ *
+ * SpiderMonkey occasionally and arbitrarily updates this value from the
+ * system time zone to attempt to keep this reasonably up-to-date. If
+ * temporary inaccuracy can't be tolerated, JSAPI clients may call
+ * JS::ResetTimeZone to forcibly sync this with the system time zone.
+ *
+ * In most cases this value is consistent with the raw time zone offset as
+ * returned by the ICU default time zone (`icu::TimeZone::getRawOffset()`),
+ * but it is possible to create cases where the operating system default time
+ * zone differs from the ICU default time zone. For example ICU doesn't
+ * support the full range of TZ environment variable settings, which can
+ * result in <ctime> returning a different time zone than what's returned by
+ * ICU. One example is "TZ=WGT3WGST,M3.5.0/-2,M10.5.0/-1", where <ctime>
+ * returns -3 hours as the local offset, but ICU flat out rejects the TZ value
+ * and instead infers the default time zone via "/etc/localtime" (on Unix).
+ * This offset can also differ from ICU when the operating system and ICU use
+ * different tzdata versions and the time zone rules of the current system
+ * time zone have changed. Or, on Windows, when the Windows default time zone
+ * can't be mapped to a IANA time zone, see for example
+ * <https://unicode-org.atlassian.net/browse/ICU-13845>.
+ *
+ * When ICU is exclusively used for time zone computations, that means when
+ * |JS_HAS_INTL_API| is true, this field is only used to detect system default
+ * time zone changes. It must not be used to convert between local and UTC
+ * time, because, as outlined above, this could lead to different results when
+ * compared to ICU.
+ */
+ int32_t utcToLocalStandardOffsetSeconds_;
+
+ RangeCache dstRange_; // UTC-based ranges
+
+#if JS_HAS_INTL_API
+ // Use the full date-time range when we can use mozilla::intl::TimeZone.
+ static constexpr int64_t MinTimeT =
+ static_cast<int64_t>(StartOfTime / msPerSecond);
+ static constexpr int64_t MaxTimeT =
+ static_cast<int64_t>(EndOfTime / msPerSecond);
+
+ RangeCache utcRange_; // localtime-based ranges
+ RangeCache localRange_; // UTC-based ranges
+
+ /**
+ * The current time zone. Lazily constructed to avoid potential I/O access
+ * when initializing this class.
+ */
+ mozilla::UniquePtr<mozilla::intl::TimeZone> timeZone_;
+
+ /**
+ * Cached names of the standard and daylight savings display names of the
+ * current time zone for the default locale.
+ */
+ JS::UniqueChars locale_;
+ JS::UniqueTwoByteChars standardName_;
+ JS::UniqueTwoByteChars daylightSavingsName_;
+#else
+ // Restrict the data-time range to the minimum required time_t range as
+ // specified in POSIX. Most operating systems support 64-bit time_t
+ // values, but we currently still have some configurations which use
+ // 32-bit time_t, e.g. the ARM simulator on 32-bit Linux (bug 1406993).
+ // Bug 1406992 explores to use 64-bit time_t when supported by the
+ // underlying operating system.
+ static constexpr int64_t MinTimeT = 0; /* time_t 01/01/1970 */
+ static constexpr int64_t MaxTimeT = 2145830400; /* time_t 12/31/2037 */
+#endif /* JS_HAS_INTL_API */
+
+ static constexpr int64_t RangeExpansionAmount = 30 * SecondsPerDay;
+
+ void internalResetTimeZone(ResetTimeZoneMode mode);
+
+ void updateTimeZone();
+
+ void internalResyncICUDefaultTimeZone();
+
+ int64_t toClampedSeconds(int64_t milliseconds);
+
+ using ComputeFn = int32_t (DateTimeInfo::*)(int64_t);
+
+ /**
+ * Get or compute an offset value for the requested seconds value.
+ */
+ int32_t getOrComputeValue(RangeCache& range, int64_t seconds,
+ ComputeFn compute);
+
+ /**
+ * Compute the DST offset at the given UTC time in seconds from the epoch.
+ * (getDSTOffsetMilliseconds attempts to return a cached value from the
+ * dstRange_ member, but in case of a cache miss it calls this method.)
+ */
+ int32_t computeDSTOffsetMilliseconds(int64_t utcSeconds);
+
+ int32_t internalGetDSTOffsetMilliseconds(int64_t utcMilliseconds);
+
+#if JS_HAS_INTL_API
+ /**
+ * Compute the UTC offset in milliseconds for the given local time. Called
+ * by internalGetOffsetMilliseconds on a cache miss.
+ */
+ int32_t computeUTCOffsetMilliseconds(int64_t localSeconds);
+
+ /**
+ * Compute the local time offset in milliseconds for the given UTC time.
+ * Called by internalGetOffsetMilliseconds on a cache miss.
+ */
+ int32_t computeLocalOffsetMilliseconds(int64_t utcSeconds);
+
+ int32_t internalGetOffsetMilliseconds(int64_t milliseconds,
+ TimeZoneOffset offset);
+
+ bool internalTimeZoneDisplayName(char16_t* buf, size_t buflen,
+ int64_t utcMilliseconds, const char* locale);
+
+ mozilla::intl::TimeZone* timeZone();
+#endif /* JS_HAS_INTL_API */
+};
+
+} /* namespace js */
+
+#endif /* vm_DateTime_h */
diff --git a/js/src/vm/EnvironmentObject-inl.h b/js/src/vm/EnvironmentObject-inl.h
new file mode 100644
index 0000000000..bab7570a70
--- /dev/null
+++ b/js/src/vm/EnvironmentObject-inl.h
@@ -0,0 +1,87 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_EnvironmentObject_inl_h
+#define vm_EnvironmentObject_inl_h
+
+#include "vm/EnvironmentObject.h"
+
+#include "vm/JSObject-inl.h"
+
+namespace js {
+
+inline ExtensibleLexicalEnvironmentObject&
+NearestEnclosingExtensibleLexicalEnvironment(JSObject* env) {
+ MOZ_ASSERT(env);
+ while (!env->is<ExtensibleLexicalEnvironmentObject>()) {
+ env = env->enclosingEnvironment();
+ MOZ_ASSERT(env);
+ }
+ return env->as<ExtensibleLexicalEnvironmentObject>();
+}
+
+// Returns the innermost "qualified var object" on the environment chain.
+// See the JSObject::isQualifiedVarObj comment for more info.
+inline JSObject& GetVariablesObject(JSObject* envChain) {
+ while (!envChain->isQualifiedVarObj()) {
+ envChain = envChain->enclosingEnvironment();
+ }
+ MOZ_ASSERT(envChain);
+ return *envChain;
+}
+
+inline const Value& EnvironmentObject::aliasedBinding(
+ EnvironmentCoordinate ec) {
+ MOZ_ASSERT(!IsExtensibleLexicalEnvironment(this));
+ MOZ_ASSERT(nonExtensibleIsFixedSlot(ec) ==
+ NativeObject::isFixedSlot(ec.slot()));
+ return getSlot(ec.slot());
+}
+
+inline void EnvironmentObject::setAliasedBinding(uint32_t slot,
+ const Value& v) {
+ setSlot(slot, v);
+}
+
+inline void EnvironmentObject::setAliasedBinding(EnvironmentCoordinate ec,
+ const Value& v) {
+ MOZ_ASSERT(!IsExtensibleLexicalEnvironment(this));
+ MOZ_ASSERT(nonExtensibleIsFixedSlot(ec) ==
+ NativeObject::isFixedSlot(ec.slot()));
+ setAliasedBinding(ec.slot(), v);
+}
+
+inline void EnvironmentObject::setAliasedBinding(const BindingIter& bi,
+ const Value& v) {
+ MOZ_ASSERT(bi.location().kind() == BindingLocation::Kind::Environment);
+ setAliasedBinding(bi.location().slot(), v);
+}
+
+inline void CallObject::setAliasedFormalFromArguments(const Value& argsValue,
+ const Value& v) {
+ setSlot(ArgumentsObject::SlotFromMagicScopeSlotValue(argsValue), v);
+}
+
+} /* namespace js */
+
+inline JSObject* JSObject::enclosingEnvironment() const {
+ if (is<js::EnvironmentObject>()) {
+ return &as<js::EnvironmentObject>().enclosingEnvironment();
+ }
+
+ if (is<js::DebugEnvironmentProxy>()) {
+ return &as<js::DebugEnvironmentProxy>().enclosingEnvironment();
+ }
+
+ if (is<js::GlobalObject>()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT_IF(is<JSFunction>(), as<JSFunction>().isInterpreted());
+ return &nonCCWGlobal();
+}
+
+#endif /* vm_EnvironmentObject_inl_h */
diff --git a/js/src/vm/EnvironmentObject.cpp b/js/src/vm/EnvironmentObject.cpp
new file mode 100644
index 0000000000..664eb39bd6
--- /dev/null
+++ b/js/src/vm/EnvironmentObject.cpp
@@ -0,0 +1,4399 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/EnvironmentObject-inl.h"
+
+#include "mozilla/Maybe.h"
+
+#include "builtin/ModuleObject.h"
+#include "js/Exception.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
+#include "js/friend/WindowProxy.h" // js::IsWindow, js::IsWindowProxy
+#include "js/PropertyAndElement.h" // JS_DefineProperty, JS_DefinePropertyById, JS_HasProperty, JS_HasPropertyById
+#include "vm/ArgumentsObject.h"
+#include "vm/BytecodeIterator.h"
+#include "vm/BytecodeLocation.h"
+#include "vm/GeneratorObject.h" // js::GetGeneratorObjectForEnvironment
+#include "vm/GlobalObject.h"
+#include "vm/JSObject.h"
+#include "vm/ProxyObject.h"
+#include "vm/Realm.h"
+#include "vm/Scope.h"
+#include "vm/Shape.h"
+#include "wasm/WasmDebug.h"
+#include "wasm/WasmDebugFrame.h"
+#include "wasm/WasmInstance.h"
+
+#include "gc/Marking-inl.h"
+#include "gc/StableCellHasher-inl.h"
+#include "vm/BytecodeIterator-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+
+using RootedArgumentsObject = Rooted<ArgumentsObject*>;
+using MutableHandleArgumentsObject = MutableHandle<ArgumentsObject*>;
+
+/*****************************************************************************/
+
+SharedShape* js::EnvironmentCoordinateToEnvironmentShape(JSScript* script,
+ jsbytecode* pc) {
+ MOZ_ASSERT(JOF_OPTYPE(JSOp(*pc)) == JOF_ENVCOORD);
+ ScopeIter si(script->innermostScope(pc));
+ uint32_t hops = EnvironmentCoordinate(pc).hops();
+ while (true) {
+ MOZ_ASSERT(!si.done());
+ if (si.hasSyntacticEnvironment()) {
+ if (!hops) {
+ break;
+ }
+ hops--;
+ }
+ si++;
+ }
+ return si.environmentShape();
+}
+
+PropertyName* js::EnvironmentCoordinateNameSlow(JSScript* script,
+ jsbytecode* pc) {
+ SharedShape* shape = EnvironmentCoordinateToEnvironmentShape(script, pc);
+ EnvironmentCoordinate ec(pc);
+
+ SharedShapePropertyIter<NoGC> iter(shape);
+ while (iter->slot() != ec.slot()) {
+ iter++;
+ }
+ jsid id = iter->key();
+
+ /* Beware nameless destructuring formal. */
+ if (!id.isAtom()) {
+ return script->runtimeFromAnyThread()->commonNames->empty;
+ }
+ return id.toAtom()->asPropertyName();
+}
+
+/*****************************************************************************/
+
+template <typename T>
+static T* CreateEnvironmentObject(JSContext* cx, Handle<SharedShape*> shape,
+ gc::Heap heap) {
+ static_assert(std::is_base_of_v<EnvironmentObject, T>,
+ "T must be an EnvironmentObject");
+
+ // All environment objects can be background-finalized.
+ gc::AllocKind allocKind = gc::GetGCObjectKind(shape->numFixedSlots());
+ MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, &T::class_));
+ allocKind = gc::ForegroundToBackgroundAllocKind(allocKind);
+
+ JSObject* obj = NativeObject::create(cx, allocKind, heap, shape);
+ if (!obj) {
+ return nullptr;
+ }
+
+ return &obj->as<T>();
+}
+
+// Helper function for simple environment objects that don't need the overloads
+// above.
+template <typename T>
+static T* CreateEnvironmentObject(JSContext* cx, Handle<SharedShape*> shape,
+ NewObjectKind newKind = GenericObject) {
+ gc::Heap heap = GetInitialHeap(newKind, &T::class_);
+ return CreateEnvironmentObject<T>(cx, shape, heap);
+}
+
+CallObject* CallObject::createWithShape(JSContext* cx,
+ Handle<SharedShape*> shape) {
+ return CreateEnvironmentObject<CallObject>(cx, shape);
+}
+
+/*
+ * Create a CallObject for a JSScript that is not initialized to any particular
+ * callsite. This object can either be initialized (with an enclosing scope and
+ * callee) or used as a template for jit compilation.
+ */
+CallObject* CallObject::create(JSContext* cx, HandleScript script,
+ HandleObject enclosing, gc::Heap heap) {
+ Rooted<SharedShape*> shape(
+ cx, script->bodyScope()->as<FunctionScope>().environmentShape());
+ MOZ_ASSERT(shape->getObjectClass() == &class_);
+
+ // The JITs assume the result is nursery allocated unless we collected the
+ // nursery, so don't change |heap| here.
+
+ auto* callObj = CreateEnvironmentObject<CallObject>(cx, shape, heap);
+ if (!callObj) {
+ return nullptr;
+ }
+
+ if (enclosing) {
+ callObj->initEnclosingEnvironment(enclosing);
+ }
+
+ return callObj;
+}
+
+CallObject* CallObject::createTemplateObject(JSContext* cx, HandleScript script,
+ HandleObject enclosing) {
+ return create(cx, script, enclosing, gc::Heap::Tenured);
+}
+
+CallObject* CallObject::create(JSContext* cx, AbstractFramePtr frame) {
+ MOZ_ASSERT(frame.isFunctionFrame());
+ cx->check(frame);
+
+ RootedObject envChain(cx, frame.environmentChain());
+ RootedFunction callee(cx, frame.callee());
+ RootedScript script(cx, callee->nonLazyScript());
+
+ gc::Heap heap = gc::Heap::Default;
+ CallObject* callobj = create(cx, script, envChain, heap);
+ if (!callobj) {
+ return nullptr;
+ }
+
+ callobj->initFixedSlot(CALLEE_SLOT, ObjectValue(*callee));
+
+ return callobj;
+}
+
+template <class EnvT>
+EnvT* FindEnclosingEnv(JSObject* env) {
+ for (;;) {
+ if (env->is<EnvT>()) {
+ break;
+ } else if (env->is<EnvironmentObject>()) {
+ env = &env->as<EnvironmentObject>().enclosingEnvironment();
+ } else if (env->is<DebugEnvironmentProxy>()) {
+ EnvironmentObject& unwrapped =
+ env->as<DebugEnvironmentProxy>().environment();
+ if (unwrapped.is<EnvT>()) {
+ env = &unwrapped;
+ break;
+ }
+ env = &env->as<DebugEnvironmentProxy>().enclosingEnvironment();
+ } else {
+ MOZ_ASSERT(env->is<GlobalObject>());
+ return nullptr;
+ }
+ }
+ return &env->as<EnvT>();
+}
+
+CallObject* CallObject::find(JSObject* env) {
+ return FindEnclosingEnv<CallObject>(env);
+}
+
+ModuleEnvironmentObject* ModuleEnvironmentObject::find(JSObject* env) {
+ return FindEnclosingEnv<ModuleEnvironmentObject>(env);
+}
+
+CallObject* CallObject::createHollowForDebug(JSContext* cx,
+ HandleFunction callee) {
+ MOZ_ASSERT(!callee->needsCallObject());
+
+ RootedScript script(cx, callee->nonLazyScript());
+ Rooted<FunctionScope*> scope(cx, &script->bodyScope()->as<FunctionScope>());
+ Rooted<SharedShape*> shape(cx, EmptyEnvironmentShape<CallObject>(cx));
+ if (!shape) {
+ return nullptr;
+ }
+ Rooted<CallObject*> callobj(cx, createWithShape(cx, shape));
+ if (!callobj) {
+ return nullptr;
+ }
+
+ // This environment's enclosing link is never used: the
+ // DebugEnvironmentProxy that refers to this scope carries its own
+ // enclosing link, which is what Debugger uses to construct the tree of
+ // Debugger.Environment objects.
+ callobj->initEnclosingEnvironment(&cx->global()->lexicalEnvironment());
+ callobj->initFixedSlot(CALLEE_SLOT, ObjectValue(*callee));
+
+ RootedValue optimizedOut(cx, MagicValue(JS_OPTIMIZED_OUT));
+ RootedId id(cx);
+ for (Rooted<BindingIter> bi(cx, BindingIter(script)); bi; bi++) {
+ id = NameToId(bi.name()->asPropertyName());
+ if (!SetProperty(cx, callobj, id, optimizedOut)) {
+ return nullptr;
+ }
+ }
+
+ return callobj;
+}
+
+const JSClass CallObject::class_ = {
+ "Call", JSCLASS_HAS_RESERVED_SLOTS(CallObject::RESERVED_SLOTS)};
+
+/*****************************************************************************/
+
+/* static */
+VarEnvironmentObject* VarEnvironmentObject::createInternal(
+ JSContext* cx, Handle<SharedShape*> shape, HandleObject enclosing,
+ gc::Heap heap) {
+ MOZ_ASSERT(shape->getObjectClass() == &class_);
+
+ auto* env = CreateEnvironmentObject<VarEnvironmentObject>(cx, shape, heap);
+ if (!env) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(!env->inDictionaryMode());
+
+ if (enclosing) {
+ env->initEnclosingEnvironment(enclosing);
+ }
+
+ return env;
+}
+
+/* static */
+VarEnvironmentObject* VarEnvironmentObject::create(JSContext* cx,
+ Handle<Scope*> scope,
+ HandleObject enclosing,
+ gc::Heap heap) {
+ MOZ_ASSERT(scope->is<EvalScope>() || scope->is<VarScope>());
+
+ Rooted<SharedShape*> shape(cx, scope->environmentShape());
+ auto* env = createInternal(cx, shape, enclosing, heap);
+ if (!env) {
+ return nullptr;
+ }
+ env->initScope(scope);
+ return env;
+}
+
+/* static */
+VarEnvironmentObject* VarEnvironmentObject::createForFrame(
+ JSContext* cx, Handle<Scope*> scope, AbstractFramePtr frame) {
+#ifdef DEBUG
+ if (frame.isEvalFrame()) {
+ MOZ_ASSERT(scope->is<EvalScope>() && scope == frame.script()->bodyScope());
+ MOZ_ASSERT_IF(frame.isInterpreterFrame(),
+ cx->interpreterFrame() == frame.asInterpreterFrame());
+ MOZ_ASSERT_IF(frame.isInterpreterFrame(),
+ cx->interpreterRegs().pc == frame.script()->code());
+ } else {
+ MOZ_ASSERT(frame.environmentChain());
+ MOZ_ASSERT_IF(
+ frame.callee()->needsCallObject(),
+ &frame.environmentChain()->as<CallObject>().callee() == frame.callee());
+ }
+#endif
+
+ RootedObject envChain(cx, frame.environmentChain());
+ return create(cx, scope, envChain, gc::Heap::Default);
+}
+
+/* static */
+VarEnvironmentObject* VarEnvironmentObject::createHollowForDebug(
+ JSContext* cx, Handle<Scope*> scope) {
+ MOZ_ASSERT(scope->is<VarScope>() || scope->kind() == ScopeKind::StrictEval);
+ MOZ_ASSERT(!scope->hasEnvironment());
+
+ Rooted<SharedShape*> shape(cx,
+ EmptyEnvironmentShape<VarEnvironmentObject>(cx));
+ if (!shape) {
+ return nullptr;
+ }
+
+ // This environment's enclosing link is never used: the
+ // DebugEnvironmentProxy that refers to this scope carries its own
+ // enclosing link, which is what Debugger uses to construct the tree of
+ // Debugger.Environment objects.
+ RootedObject enclosingEnv(cx, &cx->global()->lexicalEnvironment());
+ Rooted<VarEnvironmentObject*> env(
+ cx, createInternal(cx, shape, enclosingEnv, gc::Heap::Default));
+ if (!env) {
+ return nullptr;
+ }
+
+ RootedValue optimizedOut(cx, MagicValue(JS_OPTIMIZED_OUT));
+ RootedId id(cx);
+ for (Rooted<BindingIter> bi(cx, BindingIter(scope)); bi; bi++) {
+ id = NameToId(bi.name()->asPropertyName());
+ if (!SetProperty(cx, env, id, optimizedOut)) {
+ return nullptr;
+ }
+ }
+
+ env->initScope(scope);
+ return env;
+}
+
+/* static */
+VarEnvironmentObject* VarEnvironmentObject::createTemplateObject(
+ JSContext* cx, Handle<VarScope*> scope) {
+ return create(cx, scope, nullptr, gc::Heap::Tenured);
+}
+
+/* static */
+VarEnvironmentObject* VarEnvironmentObject::createWithoutEnclosing(
+ JSContext* cx, Handle<VarScope*> scope) {
+ return create(cx, scope, nullptr, gc::Heap::Default);
+}
+
+const JSClass VarEnvironmentObject::class_ = {
+ "Var", JSCLASS_HAS_RESERVED_SLOTS(VarEnvironmentObject::RESERVED_SLOTS)};
+
+/*****************************************************************************/
+
+const ObjectOps ModuleEnvironmentObject::objectOps_ = {
+ ModuleEnvironmentObject::lookupProperty, // lookupProperty
+ nullptr, // defineProperty
+ ModuleEnvironmentObject::hasProperty, // hasProperty
+ ModuleEnvironmentObject::getProperty, // getProperty
+ ModuleEnvironmentObject::setProperty, // setProperty
+ ModuleEnvironmentObject::
+ getOwnPropertyDescriptor, // getOwnPropertyDescriptor
+ ModuleEnvironmentObject::deleteProperty, // deleteProperty
+ nullptr, // getElements
+ nullptr, // funToString
+};
+
+const JSClassOps ModuleEnvironmentObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ ModuleEnvironmentObject::newEnumerate, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ nullptr, // finalize
+ nullptr, // call
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const JSClass ModuleEnvironmentObject::class_ = {
+ "ModuleEnvironmentObject",
+ JSCLASS_HAS_RESERVED_SLOTS(ModuleEnvironmentObject::RESERVED_SLOTS),
+ &ModuleEnvironmentObject::classOps_,
+ JS_NULL_CLASS_SPEC,
+ JS_NULL_CLASS_EXT,
+ &ModuleEnvironmentObject::objectOps_};
+
+/* static */
+ModuleEnvironmentObject* ModuleEnvironmentObject::create(
+ JSContext* cx, Handle<ModuleObject*> module) {
+ RootedScript script(cx, module->script());
+ Rooted<SharedShape*> shape(
+ cx, script->bodyScope()->as<ModuleScope>().environmentShape());
+ MOZ_ASSERT(shape->getObjectClass() == &class_);
+
+ Rooted<ModuleEnvironmentObject*> env(
+ cx, CreateEnvironmentObject<ModuleEnvironmentObject>(cx, shape,
+ TenuredObject));
+ if (!env) {
+ return nullptr;
+ }
+
+ env->initReservedSlot(MODULE_SLOT, ObjectValue(*module));
+
+ // Initialize this early so that we can manipulate the env object without
+ // causing assertions.
+ env->initEnclosingEnvironment(&cx->global()->lexicalEnvironment());
+
+ // Initialize all lexical bindings and imports as uninitialized. Imports
+ // get uninitialized because they have a special TDZ for cyclic imports.
+ for (BindingIter bi(script); bi; bi++) {
+ BindingLocation loc = bi.location();
+ if (loc.kind() == BindingLocation::Kind::Environment &&
+ BindingKindIsLexical(bi.kind())) {
+ env->initSlot(loc.slot(), MagicValue(JS_UNINITIALIZED_LEXICAL));
+ }
+ }
+
+ // It is not be possible to add or remove bindings from a module environment
+ // after this point as module code is always strict.
+#ifdef DEBUG
+ for (ShapePropertyIter<NoGC> iter(env->shape()); !iter.done(); iter++) {
+ MOZ_ASSERT(!iter->configurable());
+ }
+ MOZ_ASSERT(env->hasFlag(ObjectFlag::NotExtensible));
+ MOZ_ASSERT(!env->inDictionaryMode());
+#endif
+
+ return env;
+}
+
+ModuleObject& ModuleEnvironmentObject::module() const {
+ return getReservedSlot(MODULE_SLOT).toObject().as<ModuleObject>();
+}
+
+IndirectBindingMap& ModuleEnvironmentObject::importBindings() const {
+ return module().importBindings();
+}
+
+bool ModuleEnvironmentObject::createImportBinding(JSContext* cx,
+ Handle<JSAtom*> importName,
+ Handle<ModuleObject*> module,
+ Handle<JSAtom*> localName) {
+ RootedId importNameId(cx, AtomToId(importName));
+ RootedId localNameId(cx, AtomToId(localName));
+ Rooted<ModuleEnvironmentObject*> env(cx, &module->initialEnvironment());
+ if (!importBindings().put(cx, importNameId, env, localNameId)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool ModuleEnvironmentObject::hasImportBinding(Handle<PropertyName*> name) {
+ return importBindings().has(NameToId(name));
+}
+
+bool ModuleEnvironmentObject::lookupImport(
+ jsid name, ModuleEnvironmentObject** envOut,
+ mozilla::Maybe<PropertyInfo>* propOut) {
+ return importBindings().lookup(name, envOut, propOut);
+}
+
+/* static */
+bool ModuleEnvironmentObject::lookupProperty(JSContext* cx, HandleObject obj,
+ HandleId id,
+ MutableHandleObject objp,
+ PropertyResult* propp) {
+ const IndirectBindingMap& bindings =
+ obj->as<ModuleEnvironmentObject>().importBindings();
+ mozilla::Maybe<PropertyInfo> propInfo;
+ ModuleEnvironmentObject* env;
+ if (bindings.lookup(id, &env, &propInfo)) {
+ objp.set(env);
+ propp->setNativeProperty(*propInfo);
+ return true;
+ }
+
+ Rooted<NativeObject*> target(cx, &obj->as<NativeObject>());
+ if (!NativeLookupOwnProperty<CanGC>(cx, target, id, propp)) {
+ return false;
+ }
+
+ objp.set(obj);
+ return true;
+}
+
+/* static */
+bool ModuleEnvironmentObject::hasProperty(JSContext* cx, HandleObject obj,
+ HandleId id, bool* foundp) {
+ if (obj->as<ModuleEnvironmentObject>().importBindings().has(id)) {
+ *foundp = true;
+ return true;
+ }
+
+ Rooted<NativeObject*> self(cx, &obj->as<NativeObject>());
+ return NativeHasProperty(cx, self, id, foundp);
+}
+
+/* static */
+bool ModuleEnvironmentObject::getProperty(JSContext* cx, HandleObject obj,
+ HandleValue receiver, HandleId id,
+ MutableHandleValue vp) {
+ const IndirectBindingMap& bindings =
+ obj->as<ModuleEnvironmentObject>().importBindings();
+ mozilla::Maybe<PropertyInfo> prop;
+ ModuleEnvironmentObject* env;
+ if (bindings.lookup(id, &env, &prop)) {
+ vp.set(env->getSlot(prop->slot()));
+ return true;
+ }
+
+ Rooted<NativeObject*> self(cx, &obj->as<NativeObject>());
+ return NativeGetProperty(cx, self, receiver, id, vp);
+}
+
+/* static */
+bool ModuleEnvironmentObject::setProperty(JSContext* cx, HandleObject obj,
+ HandleId id, HandleValue v,
+ HandleValue receiver,
+ JS::ObjectOpResult& result) {
+ Rooted<ModuleEnvironmentObject*> self(cx,
+ &obj->as<ModuleEnvironmentObject>());
+ if (self->importBindings().has(id)) {
+ return result.failReadOnly();
+ }
+
+ return NativeSetProperty<Qualified>(cx, self, id, v, receiver, result);
+}
+
+/* static */
+bool ModuleEnvironmentObject::getOwnPropertyDescriptor(
+ JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandle<mozilla::Maybe<PropertyDescriptor>> desc) {
+ const IndirectBindingMap& bindings =
+ obj->as<ModuleEnvironmentObject>().importBindings();
+ mozilla::Maybe<PropertyInfo> prop;
+ ModuleEnvironmentObject* env;
+ if (bindings.lookup(id, &env, &prop)) {
+ desc.set(mozilla::Some(PropertyDescriptor::Data(
+ env->getSlot(prop->slot()),
+ {JS::PropertyAttribute::Enumerable, JS::PropertyAttribute::Writable})));
+ return true;
+ }
+
+ Rooted<NativeObject*> self(cx, &obj->as<NativeObject>());
+ return NativeGetOwnPropertyDescriptor(cx, self, id, desc);
+}
+
+/* static */
+bool ModuleEnvironmentObject::deleteProperty(JSContext* cx, HandleObject obj,
+ HandleId id,
+ ObjectOpResult& result) {
+ return result.failCantDelete();
+}
+
+/* static */
+bool ModuleEnvironmentObject::newEnumerate(JSContext* cx, HandleObject obj,
+ MutableHandleIdVector properties,
+ bool enumerableOnly) {
+ Rooted<ModuleEnvironmentObject*> self(cx,
+ &obj->as<ModuleEnvironmentObject>());
+ const IndirectBindingMap& bs(self->importBindings());
+
+ MOZ_ASSERT(properties.length() == 0);
+ size_t count = bs.count() + self->slotSpan() - RESERVED_SLOTS;
+ if (!properties.reserve(count)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ bs.forEachExportedName([&](jsid name) { properties.infallibleAppend(name); });
+
+ for (ShapePropertyIter<NoGC> iter(self->shape()); !iter.done(); iter++) {
+ properties.infallibleAppend(iter->key());
+ }
+
+ MOZ_ASSERT(properties.length() == count);
+ return true;
+}
+
+/*****************************************************************************/
+
+const JSClass WasmInstanceEnvironmentObject::class_ = {
+ "WasmInstance",
+ JSCLASS_HAS_RESERVED_SLOTS(WasmInstanceEnvironmentObject::RESERVED_SLOTS)};
+
+/* static */
+WasmInstanceEnvironmentObject*
+WasmInstanceEnvironmentObject::createHollowForDebug(
+ JSContext* cx, Handle<WasmInstanceScope*> scope) {
+ Rooted<SharedShape*> shape(
+ cx, EmptyEnvironmentShape<WasmInstanceEnvironmentObject>(cx));
+ if (!shape) {
+ return nullptr;
+ }
+
+ auto* env = CreateEnvironmentObject<WasmInstanceEnvironmentObject>(cx, shape);
+ if (!env) {
+ return nullptr;
+ }
+
+ env->initEnclosingEnvironment(&cx->global()->lexicalEnvironment());
+ env->initReservedSlot(SCOPE_SLOT, PrivateGCThingValue(scope));
+
+ return env;
+}
+
+/*****************************************************************************/
+
+const JSClass WasmFunctionCallObject::class_ = {
+ "WasmCall",
+ JSCLASS_HAS_RESERVED_SLOTS(WasmFunctionCallObject::RESERVED_SLOTS)};
+
+/* static */
+WasmFunctionCallObject* WasmFunctionCallObject::createHollowForDebug(
+ JSContext* cx, HandleObject enclosing, Handle<WasmFunctionScope*> scope) {
+ Rooted<SharedShape*> shape(cx,
+ EmptyEnvironmentShape<WasmFunctionCallObject>(cx));
+ if (!shape) {
+ return nullptr;
+ }
+
+ auto* callobj = CreateEnvironmentObject<WasmFunctionCallObject>(cx, shape);
+ if (!callobj) {
+ return nullptr;
+ }
+
+ callobj->initEnclosingEnvironment(enclosing);
+ callobj->initReservedSlot(SCOPE_SLOT, PrivateGCThingValue(scope));
+
+ return callobj;
+}
+
+/*****************************************************************************/
+
+WithEnvironmentObject* WithEnvironmentObject::create(JSContext* cx,
+ HandleObject object,
+ HandleObject enclosing,
+ Handle<WithScope*> scope) {
+ Rooted<SharedShape*> shape(cx,
+ EmptyEnvironmentShape<WithEnvironmentObject>(cx));
+ if (!shape) {
+ return nullptr;
+ }
+
+ auto* obj = CreateEnvironmentObject<WithEnvironmentObject>(cx, shape);
+ if (!obj) {
+ return nullptr;
+ }
+
+ JSObject* thisObj = GetThisObject(object);
+
+ obj->initEnclosingEnvironment(enclosing);
+ obj->initReservedSlot(OBJECT_SLOT, ObjectValue(*object));
+ obj->initReservedSlot(THIS_SLOT, ObjectValue(*thisObj));
+ if (scope) {
+ obj->initReservedSlot(SCOPE_SLOT, PrivateGCThingValue(scope));
+ } else {
+ obj->initReservedSlot(SCOPE_SLOT, NullValue());
+ }
+
+ return obj;
+}
+
+WithEnvironmentObject* WithEnvironmentObject::createNonSyntactic(
+ JSContext* cx, HandleObject object, HandleObject enclosing) {
+ return create(cx, object, enclosing, nullptr);
+}
+
+static inline bool IsUnscopableDotName(JSContext* cx, HandleId id) {
+ return id.isAtom(cx->names().dotThis) || id.isAtom(cx->names().dotNewTarget);
+}
+
+#ifdef DEBUG
+static bool IsInternalDotName(JSContext* cx, HandleId id) {
+ return id.isAtom(cx->names().dotThis) ||
+ id.isAtom(cx->names().dotGenerator) ||
+ id.isAtom(cx->names().dotInitializers) ||
+ id.isAtom(cx->names().dotFieldKeys) ||
+ id.isAtom(cx->names().dotStaticInitializers) ||
+ id.isAtom(cx->names().dotStaticFieldKeys) ||
+ id.isAtom(cx->names().dotArgs) ||
+ id.isAtom(cx->names().dotNewTarget) ||
+ id.isAtom(cx->names().starNamespaceStar);
+}
+#endif
+
+/* Implements ES6 8.1.1.2.1 HasBinding steps 7-9. */
+static bool CheckUnscopables(JSContext* cx, HandleObject obj, HandleId id,
+ bool* scopable) {
+ RootedId unscopablesId(
+ cx, PropertyKey::Symbol(cx->wellKnownSymbols().unscopables));
+ RootedValue v(cx);
+ if (!GetProperty(cx, obj, obj, unscopablesId, &v)) {
+ return false;
+ }
+ if (v.isObject()) {
+ RootedObject unscopablesObj(cx, &v.toObject());
+ if (!GetProperty(cx, unscopablesObj, unscopablesObj, id, &v)) {
+ return false;
+ }
+ *scopable = !ToBoolean(v);
+ } else {
+ *scopable = true;
+ }
+ return true;
+}
+
+static bool with_LookupProperty(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleObject objp,
+ PropertyResult* propp) {
+ // SpiderMonkey-specific: consider the internal '.this' and '.newTarget' names
+ // to be unscopable.
+ if (IsUnscopableDotName(cx, id)) {
+ objp.set(nullptr);
+ propp->setNotFound();
+ return true;
+ }
+
+ // Other internal dot-names shouldn't even end up in with-environments.
+ MOZ_ASSERT(!IsInternalDotName(cx, id));
+
+ RootedObject actual(cx, &obj->as<WithEnvironmentObject>().object());
+ if (!LookupProperty(cx, actual, id, objp, propp)) {
+ return false;
+ }
+
+ if (propp->isFound()) {
+ bool scopable;
+ if (!CheckUnscopables(cx, actual, id, &scopable)) {
+ return false;
+ }
+ if (!scopable) {
+ objp.set(nullptr);
+ propp->setNotFound();
+ }
+ }
+ return true;
+}
+
+static bool with_DefineProperty(JSContext* cx, HandleObject obj, HandleId id,
+ Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result) {
+ MOZ_ASSERT(!IsInternalDotName(cx, id));
+ RootedObject actual(cx, &obj->as<WithEnvironmentObject>().object());
+ return DefineProperty(cx, actual, id, desc, result);
+}
+
+static bool with_HasProperty(JSContext* cx, HandleObject obj, HandleId id,
+ bool* foundp) {
+ MOZ_ASSERT(!IsInternalDotName(cx, id));
+ RootedObject actual(cx, &obj->as<WithEnvironmentObject>().object());
+
+ // ES 8.1.1.2.1 step 3-5.
+ if (!HasProperty(cx, actual, id, foundp)) {
+ return false;
+ }
+ if (!*foundp) {
+ return true;
+ }
+
+ // Steps 7-10. (Step 6 is a no-op.)
+ return CheckUnscopables(cx, actual, id, foundp);
+}
+
+static bool with_GetProperty(JSContext* cx, HandleObject obj,
+ HandleValue receiver, HandleId id,
+ MutableHandleValue vp) {
+ MOZ_ASSERT(!IsInternalDotName(cx, id));
+ RootedObject actual(cx, &obj->as<WithEnvironmentObject>().object());
+ RootedValue actualReceiver(cx, receiver);
+ if (receiver.isObject() && &receiver.toObject() == obj) {
+ actualReceiver.setObject(*actual);
+ }
+ return GetProperty(cx, actual, actualReceiver, id, vp);
+}
+
+static bool with_SetProperty(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue v, HandleValue receiver,
+ ObjectOpResult& result) {
+ MOZ_ASSERT(!IsInternalDotName(cx, id));
+ RootedObject actual(cx, &obj->as<WithEnvironmentObject>().object());
+ RootedValue actualReceiver(cx, receiver);
+ if (receiver.isObject() && &receiver.toObject() == obj) {
+ actualReceiver.setObject(*actual);
+ }
+ return SetProperty(cx, actual, id, v, actualReceiver, result);
+}
+
+static bool with_GetOwnPropertyDescriptor(
+ JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandle<mozilla::Maybe<PropertyDescriptor>> desc) {
+ MOZ_ASSERT(!IsInternalDotName(cx, id));
+ RootedObject actual(cx, &obj->as<WithEnvironmentObject>().object());
+ return GetOwnPropertyDescriptor(cx, actual, id, desc);
+}
+
+static bool with_DeleteProperty(JSContext* cx, HandleObject obj, HandleId id,
+ ObjectOpResult& result) {
+ MOZ_ASSERT(!IsInternalDotName(cx, id));
+ RootedObject actual(cx, &obj->as<WithEnvironmentObject>().object());
+ return DeleteProperty(cx, actual, id, result);
+}
+
+static const ObjectOps WithEnvironmentObjectOps = {
+ with_LookupProperty, // lookupProperty
+ with_DefineProperty, // defineProperty
+ with_HasProperty, // hasProperty
+ with_GetProperty, // getProperty
+ with_SetProperty, // setProperty
+ with_GetOwnPropertyDescriptor, // getOwnPropertyDescriptor
+ with_DeleteProperty, // deleteProperty
+ nullptr, // getElements
+ nullptr, // funToString
+};
+
+const JSClass WithEnvironmentObject::class_ = {
+ "With",
+ JSCLASS_HAS_RESERVED_SLOTS(WithEnvironmentObject::RESERVED_SLOTS),
+ JS_NULL_CLASS_OPS,
+ JS_NULL_CLASS_SPEC,
+ JS_NULL_CLASS_EXT,
+ &WithEnvironmentObjectOps};
+
+/* static */
+NonSyntacticVariablesObject* NonSyntacticVariablesObject::create(
+ JSContext* cx) {
+ Rooted<SharedShape*> shape(
+ cx, EmptyEnvironmentShape<NonSyntacticVariablesObject>(cx));
+ if (!shape) {
+ return nullptr;
+ }
+
+ Rooted<NonSyntacticVariablesObject*> obj(
+ cx, CreateEnvironmentObject<NonSyntacticVariablesObject>(cx, shape,
+ TenuredObject));
+ if (!obj) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(obj->isUnqualifiedVarObj());
+ if (!JSObject::setQualifiedVarObj(cx, obj)) {
+ return nullptr;
+ }
+
+ obj->initEnclosingEnvironment(&cx->global()->lexicalEnvironment());
+ return obj;
+}
+
+const JSClass NonSyntacticVariablesObject::class_ = {
+ "NonSyntacticVariablesObject",
+ JSCLASS_HAS_RESERVED_SLOTS(NonSyntacticVariablesObject::RESERVED_SLOTS)};
+
+bool js::CreateNonSyntacticEnvironmentChain(JSContext* cx,
+ HandleObjectVector envChain,
+ MutableHandleObject env) {
+ // Callers are responsible for segregating the NonSyntactic case from simple
+ // compilation cases.
+ MOZ_RELEASE_ASSERT(!envChain.empty());
+
+ RootedObject globalLexical(cx, &cx->global()->lexicalEnvironment());
+ if (!CreateObjectsForEnvironmentChain(cx, envChain, globalLexical, env)) {
+ return false;
+ }
+
+ // The XPConnect subscript loader, which may pass in its own
+ // environments to load scripts in, expects the environment chain to
+ // be the holder of "var" declarations. In SpiderMonkey, such objects
+ // are called "qualified varobjs", the "qualified" part meaning the
+ // declaration was qualified by "var". There is only sadness.
+ //
+ // See JSObject::isQualifiedVarObj.
+ if (!JSObject::setQualifiedVarObj(cx, env)) {
+ return false;
+ }
+
+ // Also get a non-syntactic lexical environment to capture 'let' and
+ // 'const' bindings. To persist lexical bindings, we have a 1-1
+ // mapping with the final unwrapped environment object (the
+ // environment that stores the 'var' bindings) and the lexical
+ // environment.
+ //
+ // TODOshu: disallow the subscript loader from using non-distinguished
+ // objects as dynamic scopes.
+ env.set(
+ ObjectRealm::get(env).getOrCreateNonSyntacticLexicalEnvironment(cx, env));
+ return !!env;
+}
+
+/*****************************************************************************/
+
+const JSClass LexicalEnvironmentObject::class_ = {
+ "LexicalEnvironment",
+ JSCLASS_HAS_RESERVED_SLOTS(LexicalEnvironmentObject::RESERVED_SLOTS),
+ JS_NULL_CLASS_OPS,
+ JS_NULL_CLASS_SPEC,
+ JS_NULL_CLASS_EXT,
+ JS_NULL_OBJECT_OPS};
+
+/* static */
+LexicalEnvironmentObject* LexicalEnvironmentObject::create(
+ JSContext* cx, Handle<SharedShape*> shape, HandleObject enclosing,
+ gc::Heap heap) {
+ MOZ_ASSERT(shape->getObjectClass() == &LexicalEnvironmentObject::class_);
+
+ // The JITs assume the result is nursery allocated unless we collected the
+ // nursery, so don't change |heap| here.
+
+ auto* env =
+ CreateEnvironmentObject<LexicalEnvironmentObject>(cx, shape, heap);
+ if (!env) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(!env->inDictionaryMode());
+
+ if (enclosing) {
+ env->initEnclosingEnvironment(enclosing);
+ }
+
+ return env;
+}
+
+bool LexicalEnvironmentObject::isExtensible() const {
+ return NativeObject::isExtensible();
+}
+
+/* static */
+BlockLexicalEnvironmentObject* BlockLexicalEnvironmentObject::create(
+ JSContext* cx, Handle<LexicalScope*> scope, HandleObject enclosing,
+ gc::Heap heap) {
+ cx->check(enclosing);
+ MOZ_ASSERT(scope->hasEnvironment());
+
+ Rooted<SharedShape*> shape(cx, scope->environmentShape());
+ auto* env = static_cast<BlockLexicalEnvironmentObject*>(
+ LexicalEnvironmentObject::create(cx, shape, enclosing, heap));
+ if (!env) {
+ return nullptr;
+ }
+
+ // All lexical bindings start off uninitialized for TDZ.
+ uint32_t lastSlot = env->getLastProperty().slot();
+ for (uint32_t slot = JSSLOT_FREE(&class_); slot <= lastSlot; slot++) {
+ env->initSlot(slot, MagicValue(JS_UNINITIALIZED_LEXICAL));
+ }
+
+ env->initScope(scope);
+ return env;
+}
+
+/* static */
+BlockLexicalEnvironmentObject* BlockLexicalEnvironmentObject::createForFrame(
+ JSContext* cx, Handle<LexicalScope*> scope, AbstractFramePtr frame) {
+ RootedObject enclosing(cx, frame.environmentChain());
+ return create(cx, scope, enclosing, gc::Heap::Default);
+}
+
+/* static */
+BlockLexicalEnvironmentObject*
+BlockLexicalEnvironmentObject::createHollowForDebug(
+ JSContext* cx, Handle<LexicalScope*> scope) {
+ MOZ_ASSERT(!scope->hasEnvironment());
+
+ Rooted<SharedShape*> shape(
+ cx, LexicalScope::getEmptyExtensibleEnvironmentShape(cx));
+ if (!shape) {
+ return nullptr;
+ }
+
+ // This environment's enclosing link is never used: the
+ // DebugEnvironmentProxy that refers to this scope carries its own
+ // enclosing link, which is what Debugger uses to construct the tree of
+ // Debugger.Environment objects.
+ RootedObject enclosingEnv(cx, &cx->global()->lexicalEnvironment());
+ Rooted<LexicalEnvironmentObject*> env(
+ cx, LexicalEnvironmentObject::create(cx, shape, enclosingEnv,
+ gc::Heap::Tenured));
+ if (!env) {
+ return nullptr;
+ }
+
+ RootedValue optimizedOut(cx, MagicValue(JS_OPTIMIZED_OUT));
+ RootedId id(cx);
+ for (Rooted<BindingIter> bi(cx, BindingIter(scope)); bi; bi++) {
+ id = NameToId(bi.name()->asPropertyName());
+ if (!SetProperty(cx, env, id, optimizedOut)) {
+ return nullptr;
+ }
+ }
+
+ if (!JSObject::setFlag(cx, env, ObjectFlag::NotExtensible)) {
+ return nullptr;
+ }
+
+ env->as<ScopedLexicalEnvironmentObject>().initScope(scope);
+ return &env->as<BlockLexicalEnvironmentObject>();
+}
+
+/* static */
+BlockLexicalEnvironmentObject*
+BlockLexicalEnvironmentObject::createTemplateObject(
+ JSContext* cx, Handle<LexicalScope*> scope) {
+ return create(cx, scope, nullptr, gc::Heap::Tenured);
+}
+
+/* static */
+BlockLexicalEnvironmentObject*
+BlockLexicalEnvironmentObject::createWithoutEnclosing(
+ JSContext* cx, Handle<LexicalScope*> scope) {
+ return create(cx, scope, nullptr, gc::Heap::Default);
+}
+
+/* static */
+BlockLexicalEnvironmentObject* BlockLexicalEnvironmentObject::clone(
+ JSContext* cx, Handle<BlockLexicalEnvironmentObject*> env) {
+ Rooted<LexicalScope*> scope(cx, &env->scope());
+ RootedObject enclosing(cx, &env->enclosingEnvironment());
+ Rooted<BlockLexicalEnvironmentObject*> copy(
+ cx, create(cx, scope, enclosing, gc::Heap::Default));
+ if (!copy) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(env->shape() == copy->shape());
+
+ for (uint32_t i = JSSLOT_FREE(&class_); i < copy->slotSpan(); i++) {
+ copy->setSlot(i, env->getSlot(i));
+ }
+
+ return copy;
+}
+
+/* static */
+BlockLexicalEnvironmentObject* BlockLexicalEnvironmentObject::recreate(
+ JSContext* cx, Handle<BlockLexicalEnvironmentObject*> env) {
+ Rooted<LexicalScope*> scope(cx, &env->scope());
+ RootedObject enclosing(cx, &env->enclosingEnvironment());
+ return create(cx, scope, enclosing, gc::Heap::Default);
+}
+
+/* static */
+NamedLambdaObject* NamedLambdaObject::create(JSContext* cx,
+ HandleFunction callee,
+ HandleObject enclosing,
+ gc::Heap heap) {
+ MOZ_ASSERT(callee->isNamedLambda());
+ Rooted<Scope*> scope(cx, callee->nonLazyScript()->maybeNamedLambdaScope());
+ MOZ_ASSERT(scope && scope->environmentShape());
+
+#ifdef DEBUG
+ {
+ // Named lambda objects have one (non-writable) property.
+ SharedShapePropertyIter<NoGC> iter(scope->environmentShape());
+ MOZ_ASSERT(iter->slot() == lambdaSlot());
+ MOZ_ASSERT(!iter->writable());
+ iter++;
+ MOZ_ASSERT(iter.done());
+
+ // There should be exactly one binding in the named lambda scope.
+ BindingIter bi(scope);
+ bi++;
+ MOZ_ASSERT(bi.done());
+ }
+#endif
+
+ BlockLexicalEnvironmentObject* obj = BlockLexicalEnvironmentObject::create(
+ cx, scope.as<LexicalScope>(), enclosing, heap);
+ if (!obj) {
+ return nullptr;
+ }
+
+ obj->initFixedSlot(lambdaSlot(), ObjectValue(*callee));
+ return static_cast<NamedLambdaObject*>(obj);
+}
+
+/* static */
+NamedLambdaObject* NamedLambdaObject::createTemplateObject(
+ JSContext* cx, HandleFunction callee) {
+ return create(cx, callee, nullptr, gc::Heap::Tenured);
+}
+
+/* static */
+NamedLambdaObject* NamedLambdaObject::createWithoutEnclosing(
+ JSContext* cx, HandleFunction callee) {
+ return create(cx, callee, nullptr, gc::Heap::Default);
+}
+
+/* static */
+NamedLambdaObject* NamedLambdaObject::create(JSContext* cx,
+ AbstractFramePtr frame) {
+ RootedFunction fun(cx, frame.callee());
+ RootedObject enclosing(cx, frame.environmentChain());
+ return create(cx, fun, enclosing, gc::Heap::Default);
+}
+
+/* static */
+size_t NamedLambdaObject::lambdaSlot() {
+ // Named lambda environments have exactly one name.
+ return JSSLOT_FREE(&LexicalEnvironmentObject::class_);
+}
+
+/* static */
+ClassBodyLexicalEnvironmentObject* ClassBodyLexicalEnvironmentObject::create(
+ JSContext* cx, Handle<ClassBodyScope*> scope, HandleObject enclosing,
+ gc::Heap heap) {
+ cx->check(enclosing);
+ MOZ_ASSERT(scope->hasEnvironment());
+
+ Rooted<SharedShape*> shape(cx, scope->environmentShape());
+ auto* env = static_cast<ClassBodyLexicalEnvironmentObject*>(
+ LexicalEnvironmentObject::create(cx, shape, enclosing, heap));
+ if (!env) {
+ return nullptr;
+ }
+
+ env->initScope(scope);
+ return env;
+}
+
+/* static */
+ClassBodyLexicalEnvironmentObject*
+ClassBodyLexicalEnvironmentObject::createForFrame(JSContext* cx,
+ Handle<ClassBodyScope*> scope,
+ AbstractFramePtr frame) {
+ RootedObject enclosing(cx, frame.environmentChain());
+ return create(cx, scope, enclosing, gc::Heap::Default);
+}
+
+/* static */
+ClassBodyLexicalEnvironmentObject*
+ClassBodyLexicalEnvironmentObject::createTemplateObject(
+ JSContext* cx, Handle<ClassBodyScope*> scope) {
+ return create(cx, scope, nullptr, gc::Heap::Tenured);
+}
+
+/* static */
+ClassBodyLexicalEnvironmentObject*
+ClassBodyLexicalEnvironmentObject::createWithoutEnclosing(
+ JSContext* cx, Handle<ClassBodyScope*> scope) {
+ return create(cx, scope, nullptr, gc::Heap::Default);
+}
+
+JSObject* ExtensibleLexicalEnvironmentObject::thisObject() const {
+ JSObject* obj = &getReservedSlot(THIS_VALUE_OR_SCOPE_SLOT).toObject();
+
+ // Windows must never be exposed to script. setWindowProxyThisValue should
+ // have set this to the WindowProxy.
+ MOZ_ASSERT(!IsWindow(obj));
+
+ // WarpBuilder relies on the return value not being nursery-allocated for the
+ // global lexical environment.
+ MOZ_ASSERT_IF(isGlobal(), obj->isTenured());
+
+ return obj;
+}
+
+/* static */
+ExtensibleLexicalEnvironmentObject*
+ExtensibleLexicalEnvironmentObject::forVarEnvironment(JSObject* obj) {
+ ExtensibleLexicalEnvironmentObject* lexical = nullptr;
+ if (obj->is<GlobalObject>()) {
+ lexical = &obj->as<GlobalObject>().lexicalEnvironment();
+ } else {
+ lexical = ObjectRealm::get(obj).getNonSyntacticLexicalEnvironment(obj);
+ }
+ MOZ_ASSERT(lexical);
+ return lexical;
+}
+
+/* static */
+GlobalLexicalEnvironmentObject* GlobalLexicalEnvironmentObject::create(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ MOZ_ASSERT(global);
+
+ Rooted<SharedShape*> shape(
+ cx, LexicalScope::getEmptyExtensibleEnvironmentShape(cx));
+ if (!shape) {
+ return nullptr;
+ }
+
+ auto* env = static_cast<GlobalLexicalEnvironmentObject*>(
+ LexicalEnvironmentObject::create(cx, shape, global, gc::Heap::Tenured));
+ if (!env) {
+ return nullptr;
+ }
+
+ env->initThisObject(global);
+ return env;
+}
+
+void GlobalLexicalEnvironmentObject::setWindowProxyThisObject(JSObject* obj) {
+ MOZ_ASSERT(IsWindowProxy(obj));
+ setReservedSlot(THIS_VALUE_OR_SCOPE_SLOT, ObjectValue(*obj));
+}
+
+/* static */
+NonSyntacticLexicalEnvironmentObject*
+NonSyntacticLexicalEnvironmentObject::create(JSContext* cx,
+ HandleObject enclosing,
+ HandleObject thisv) {
+ MOZ_ASSERT(enclosing);
+ MOZ_ASSERT(!IsSyntacticEnvironment(enclosing));
+
+ Rooted<SharedShape*> shape(
+ cx, LexicalScope::getEmptyExtensibleEnvironmentShape(cx));
+ if (!shape) {
+ return nullptr;
+ }
+
+ auto* env = static_cast<NonSyntacticLexicalEnvironmentObject*>(
+ LexicalEnvironmentObject::create(cx, shape, enclosing,
+ gc::Heap::Tenured));
+ if (!env) {
+ return nullptr;
+ }
+
+ env->initThisObject(thisv);
+
+ return env;
+}
+
+/* static */
+RuntimeLexicalErrorObject* RuntimeLexicalErrorObject::create(
+ JSContext* cx, HandleObject enclosing, unsigned errorNumber) {
+ Rooted<SharedShape*> shape(
+ cx,
+ EmptyEnvironmentShape(cx, &class_, JSSLOT_FREE(&class_), ObjectFlags()));
+ if (!shape) {
+ return nullptr;
+ }
+
+ auto* obj = CreateEnvironmentObject<RuntimeLexicalErrorObject>(cx, shape);
+ if (!obj) {
+ return nullptr;
+ }
+ obj->initEnclosingEnvironment(enclosing);
+ obj->initReservedSlot(ERROR_SLOT, Int32Value(int32_t(errorNumber)));
+
+ return obj;
+}
+
+static void ReportRuntimeLexicalErrorId(JSContext* cx, unsigned errorNumber,
+ HandleId id) {
+ if (id.isAtom()) {
+ Rooted<PropertyName*> name(cx, id.toAtom()->asPropertyName());
+ ReportRuntimeLexicalError(cx, errorNumber, name);
+ return;
+ }
+ MOZ_CRASH(
+ "RuntimeLexicalErrorObject should only be used with property names");
+}
+
+static bool lexicalError_LookupProperty(JSContext* cx, HandleObject obj,
+ HandleId id, MutableHandleObject objp,
+ PropertyResult* propp) {
+ ReportRuntimeLexicalErrorId(
+ cx, obj->as<RuntimeLexicalErrorObject>().errorNumber(), id);
+ return false;
+}
+
+static bool lexicalError_HasProperty(JSContext* cx, HandleObject obj,
+ HandleId id, bool* foundp) {
+ ReportRuntimeLexicalErrorId(
+ cx, obj->as<RuntimeLexicalErrorObject>().errorNumber(), id);
+ return false;
+}
+
+static bool lexicalError_GetProperty(JSContext* cx, HandleObject obj,
+ HandleValue receiver, HandleId id,
+ MutableHandleValue vp) {
+ ReportRuntimeLexicalErrorId(
+ cx, obj->as<RuntimeLexicalErrorObject>().errorNumber(), id);
+ return false;
+}
+
+static bool lexicalError_SetProperty(JSContext* cx, HandleObject obj,
+ HandleId id, HandleValue v,
+ HandleValue receiver,
+ ObjectOpResult& result) {
+ ReportRuntimeLexicalErrorId(
+ cx, obj->as<RuntimeLexicalErrorObject>().errorNumber(), id);
+ return false;
+}
+
+static bool lexicalError_GetOwnPropertyDescriptor(
+ JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandle<mozilla::Maybe<PropertyDescriptor>> desc) {
+ ReportRuntimeLexicalErrorId(
+ cx, obj->as<RuntimeLexicalErrorObject>().errorNumber(), id);
+ return false;
+}
+
+static bool lexicalError_DeleteProperty(JSContext* cx, HandleObject obj,
+ HandleId id, ObjectOpResult& result) {
+ ReportRuntimeLexicalErrorId(
+ cx, obj->as<RuntimeLexicalErrorObject>().errorNumber(), id);
+ return false;
+}
+
+static const ObjectOps RuntimeLexicalErrorObjectObjectOps = {
+ lexicalError_LookupProperty, // lookupProperty
+ nullptr, // defineProperty
+ lexicalError_HasProperty, // hasProperty
+ lexicalError_GetProperty, // getProperty
+ lexicalError_SetProperty, // setProperty
+ lexicalError_GetOwnPropertyDescriptor, // getOwnPropertyDescriptor
+ lexicalError_DeleteProperty, // deleteProperty
+ nullptr, // getElements
+ nullptr, // funToString
+};
+
+const JSClass RuntimeLexicalErrorObject::class_ = {
+ "RuntimeLexicalError",
+ JSCLASS_HAS_RESERVED_SLOTS(RuntimeLexicalErrorObject::RESERVED_SLOTS),
+ JS_NULL_CLASS_OPS,
+ JS_NULL_CLASS_SPEC,
+ JS_NULL_CLASS_EXT,
+ &RuntimeLexicalErrorObjectObjectOps};
+
+/*****************************************************************************/
+
+EnvironmentIter::EnvironmentIter(JSContext* cx, const EnvironmentIter& ei)
+ : si_(cx, ei.si_.get()), env_(cx, ei.env_), frame_(ei.frame_) {}
+
+EnvironmentIter::EnvironmentIter(JSContext* cx, JSObject* env, Scope* scope)
+ : si_(cx, ScopeIter(scope)), env_(cx, env), frame_(NullFramePtr()) {
+ settle();
+}
+
+EnvironmentIter::EnvironmentIter(JSContext* cx, AbstractFramePtr frame,
+ const jsbytecode* pc)
+ : si_(cx, frame.script()->innermostScope(pc)),
+ env_(cx, frame.environmentChain()),
+ frame_(frame) {
+ cx->check(frame);
+ settle();
+}
+
+EnvironmentIter::EnvironmentIter(JSContext* cx, JSObject* env, Scope* scope,
+ AbstractFramePtr frame)
+ : si_(cx, ScopeIter(scope)), env_(cx, env), frame_(frame) {
+ cx->check(frame);
+ settle();
+}
+
+void EnvironmentIter::incrementScopeIter() {
+ if (si_.scope()->is<GlobalScope>()) {
+ // GlobalScopes may be syntactic or non-syntactic. Non-syntactic
+ // GlobalScopes correspond to zero or more non-syntactic
+ // EnvironmentsObjects followed by the global lexical scope, then the
+ // GlobalObject or another non-EnvironmentObject object.
+ if (!env_->is<EnvironmentObject>()) {
+ si_++;
+ }
+ } else {
+ si_++;
+ }
+}
+
+void EnvironmentIter::settle() {
+ // Check for trying to iterate a function or eval frame before the prologue
+ // has created the CallObject, in which case we have to skip.
+ if (frame_ && frame_.hasScript() &&
+ frame_.script()->initialEnvironmentShape() &&
+ !frame_.hasInitialEnvironment()) {
+ // Skip until we're at the enclosing scope of the script.
+ while (si_.scope() != frame_.script()->enclosingScope()) {
+ if (env_->is<BlockLexicalEnvironmentObject>() &&
+ &env_->as<BlockLexicalEnvironmentObject>().scope() == si_.scope()) {
+ MOZ_ASSERT(si_.kind() == ScopeKind::NamedLambda ||
+ si_.kind() == ScopeKind::StrictNamedLambda);
+ env_ =
+ &env_->as<BlockLexicalEnvironmentObject>().enclosingEnvironment();
+ }
+ incrementScopeIter();
+ }
+ }
+
+ // Check if we have left the extent of the initial frame after we've
+ // settled on a static scope.
+ if (frame_ &&
+ (!si_ ||
+ (frame_.hasScript() &&
+ si_.scope() == frame_.script()->enclosingScope()) ||
+ (frame_.isWasmDebugFrame() && !si_.scope()->is<WasmFunctionScope>()))) {
+ frame_ = NullFramePtr();
+ }
+
+#ifdef DEBUG
+ if (si_) {
+ if (hasSyntacticEnvironment()) {
+ Scope* scope = si_.scope();
+ if (scope->is<LexicalScope>()) {
+ MOZ_ASSERT(scope == &env_->as<BlockLexicalEnvironmentObject>().scope());
+ } else if (scope->is<FunctionScope>()) {
+ MOZ_ASSERT(scope->as<FunctionScope>().script() ==
+ env_->as<CallObject>()
+ .callee()
+ .maybeCanonicalFunction()
+ ->baseScript());
+ } else if (scope->is<VarScope>()) {
+ MOZ_ASSERT(scope == &env_->as<VarEnvironmentObject>().scope());
+ } else if (scope->is<WithScope>()) {
+ MOZ_ASSERT(scope == &env_->as<WithEnvironmentObject>().scope());
+ } else if (scope->is<EvalScope>()) {
+ MOZ_ASSERT(scope == &env_->as<VarEnvironmentObject>().scope());
+ } else if (scope->is<GlobalScope>()) {
+ MOZ_ASSERT(env_->is<GlobalObject>() ||
+ IsGlobalLexicalEnvironment(env_));
+ }
+ } else if (hasNonSyntacticEnvironmentObject()) {
+ if (env_->is<LexicalEnvironmentObject>()) {
+ // The global lexical environment still encloses non-syntactic
+ // environment objects.
+ MOZ_ASSERT(env_->is<NonSyntacticLexicalEnvironmentObject>() ||
+ env_->is<GlobalLexicalEnvironmentObject>());
+ } else if (env_->is<WithEnvironmentObject>()) {
+ MOZ_ASSERT(!env_->as<WithEnvironmentObject>().isSyntactic());
+ } else {
+ MOZ_ASSERT(env_->is<NonSyntacticVariablesObject>());
+ }
+ }
+ }
+#endif
+}
+
+JSObject& EnvironmentIter::enclosingEnvironment() const {
+ // As an engine invariant (maintained internally and asserted by Execute),
+ // EnvironmentObjects and non-EnvironmentObjects cannot be interleaved on
+ // the scope chain; every scope chain must start with zero or more
+ // EnvironmentObjects and terminate with one or more
+ // non-EnvironmentObjects (viz., GlobalObject).
+ MOZ_ASSERT(done());
+ MOZ_ASSERT(!env_->is<EnvironmentObject>());
+ return *env_;
+}
+
+bool EnvironmentIter::hasNonSyntacticEnvironmentObject() const {
+ // The case we're worrying about here is a NonSyntactic static scope which
+ // has 0+ corresponding non-syntactic WithEnvironmentObject scopes, a
+ // NonSyntacticVariablesObject, or a NonSyntacticLexicalEnvironmentObject.
+ if (si_.kind() == ScopeKind::NonSyntactic) {
+ MOZ_ASSERT_IF(env_->is<WithEnvironmentObject>(),
+ !env_->as<WithEnvironmentObject>().isSyntactic());
+ return env_->is<EnvironmentObject>();
+ }
+ return false;
+}
+
+/* static */
+HashNumber MissingEnvironmentKey::hash(MissingEnvironmentKey ek) {
+ return size_t(ek.frame_.raw()) ^ size_t(ek.scope_);
+}
+
+/* static */
+bool MissingEnvironmentKey::match(MissingEnvironmentKey ek1,
+ MissingEnvironmentKey ek2) {
+ return ek1.frame_ == ek2.frame_ && ek1.scope_ == ek2.scope_;
+}
+
+bool LiveEnvironmentVal::traceWeak(JSTracer* trc) {
+ return TraceWeakEdge(trc, &scope_, "LiveEnvironmentVal::scope_");
+}
+
+// Live EnvironmentIter values may be added to DebugEnvironments::liveEnvs, as
+// LiveEnvironmentVal instances. They need to have write barriers when they are
+// added to the hash table, but no barriers when rehashing inside GC. It's a
+// nasty hack, but the important thing is that LiveEnvironmentVal and
+// MissingEnvironmentKey need to alias each other.
+void LiveEnvironmentVal::staticAsserts() {
+ static_assert(
+ sizeof(LiveEnvironmentVal) == sizeof(MissingEnvironmentKey),
+ "LiveEnvironmentVal must be same size of MissingEnvironmentKey");
+ static_assert(
+ offsetof(LiveEnvironmentVal, scope_) ==
+ offsetof(MissingEnvironmentKey, scope_),
+ "LiveEnvironmentVal.scope_ must alias MissingEnvironmentKey.scope_");
+}
+
+/*****************************************************************************/
+
+namespace {
+
+/*
+ * DebugEnvironmentProxy is the handler for DebugEnvironmentProxy proxy
+ * objects. Having a custom handler (rather than trying to reuse js::Wrapper)
+ * gives us several important abilities:
+ * - We want to pass the EnvironmentObject as the receiver to forwarded scope
+ * property ops on aliased variables so that Call/Block/With ops do not all
+ * require a 'normalization' step.
+ * - The debug scope proxy can directly manipulate the stack frame to allow
+ * the debugger to read/write args/locals that were otherwise unaliased.
+ * - The debug scope proxy can store unaliased variables after the stack frame
+ * is popped so that they may still be read/written by the debugger.
+ * - The engine has made certain assumptions about the possible reads/writes
+ * in a scope. DebugEnvironmentProxy allows us to prevent the debugger from
+ * breaking those assumptions.
+ * - The engine makes optimizations that are observable to the debugger. The
+ * proxy can either hide these optimizations or make the situation more
+ * clear to the debugger. An example is 'arguments'.
+ */
+class DebugEnvironmentProxyHandler : public BaseProxyHandler {
+ enum Action { SET, GET };
+
+ enum AccessResult { ACCESS_UNALIASED, ACCESS_GENERIC, ACCESS_LOST };
+
+ /*
+ * This function handles access to unaliased locals/formals. Since they
+ * are unaliased, the values of these variables are not stored in the
+ * slots of the normal CallObject and BlockLexicalEnvironmentObject
+ * environments and thus must be recovered from somewhere else:
+ * + if the invocation for which the env was created is still executing,
+ * there is a JS frame live on the stack holding the values;
+ * + if the invocation for which the env was created finished executing:
+ * - and there was a DebugEnvironmentProxy associated with env, then
+ * the DebugEnvironments::onPop(Call|Lexical) handler copied out the
+ * unaliased variables. In both cases, a dense array is created in
+ * onPop(Call|Lexical) to hold the unaliased values and attached to
+ * the DebugEnvironmentProxy;
+ * - and there was not a DebugEnvironmentProxy yet associated with the
+ * scope, then the unaliased values are lost and not recoverable.
+ *
+ * Callers should check accessResult for non-failure results:
+ * - ACCESS_UNALIASED if the access was unaliased and completed
+ * - ACCESS_GENERIC if the access was aliased or the property not found
+ * - ACCESS_LOST if the value has been lost to the debugger and the
+ * action is GET; if the action is SET, we assign to the
+ * name of the variable on the environment object
+ */
+ bool handleUnaliasedAccess(JSContext* cx,
+ Handle<DebugEnvironmentProxy*> debugEnv,
+ Handle<EnvironmentObject*> env, HandleId id,
+ Action action, MutableHandleValue vp,
+ AccessResult* accessResult) const {
+ MOZ_ASSERT(&debugEnv->environment() == env);
+ MOZ_ASSERT_IF(action == SET, !debugEnv->isOptimizedOut());
+ *accessResult = ACCESS_GENERIC;
+ LiveEnvironmentVal* maybeLiveEnv =
+ DebugEnvironments::hasLiveEnvironment(*env);
+
+ // Handle unaliased formals, vars, lets, and consts at function or module
+ // scope.
+ if (env->is<CallObject>() || env->is<ModuleEnvironmentObject>()) {
+ RootedScript script(cx);
+ if (env->is<CallObject>()) {
+ CallObject& callobj = env->as<CallObject>();
+ RootedFunction fun(cx, &callobj.callee());
+ script = JSFunction::getOrCreateScript(cx, fun);
+ } else {
+ script = env->as<ModuleEnvironmentObject>().module().maybeScript();
+ if (!script) {
+ return true;
+ }
+ }
+
+ BindingIter bi(script);
+ while (bi && NameToId(bi.name()->asPropertyName()) != id) {
+ bi++;
+ }
+ if (!bi) {
+ return true;
+ }
+
+ if (action == SET && bi.kind() == BindingKind::Const) {
+ ReportRuntimeLexicalError(cx, JSMSG_BAD_CONST_ASSIGN, id);
+ return false;
+ }
+
+ if (bi.location().kind() == BindingLocation::Kind::Import) {
+ return true;
+ }
+
+ if (!bi.hasArgumentSlot()) {
+ if (bi.closedOver()) {
+ return true;
+ }
+
+ uint32_t i = bi.location().slot();
+ if (maybeLiveEnv) {
+ AbstractFramePtr frame = maybeLiveEnv->frame();
+ if (action == GET) {
+ vp.set(frame.unaliasedLocal(i));
+ } else {
+ frame.unaliasedLocal(i) = vp;
+ }
+ } else if (AbstractGeneratorObject* genObj =
+ GetGeneratorObjectForEnvironment(cx, env);
+ genObj && genObj->isSuspended() &&
+ genObj->hasStackStorage()) {
+ if (action == GET) {
+ vp.set(genObj->getUnaliasedLocal(i));
+ } else {
+ genObj->setUnaliasedLocal(i, vp);
+ }
+ } else if (NativeObject* snapshot = debugEnv->maybeSnapshot()) {
+ if (action == GET) {
+ vp.set(snapshot->getDenseElement(script->numArgs() + i));
+ } else {
+ snapshot->setDenseElement(script->numArgs() + i, vp);
+ }
+ } else {
+ /* The unaliased value has been lost to the debugger. */
+ if (action == GET) {
+ *accessResult = ACCESS_LOST;
+ return true;
+ }
+ }
+ } else {
+ unsigned i = bi.argumentSlot();
+ if (bi.closedOver()) {
+ return true;
+ }
+
+ if (maybeLiveEnv) {
+ AbstractFramePtr frame = maybeLiveEnv->frame();
+ if (script->argsObjAliasesFormals() && frame.hasArgsObj()) {
+ if (action == GET) {
+ vp.set(frame.argsObj().arg(i));
+ } else {
+ frame.argsObj().setArg(i, vp);
+ }
+ } else {
+ if (action == GET) {
+ vp.set(frame.unaliasedFormal(i, DONT_CHECK_ALIASING));
+ } else {
+ frame.unaliasedFormal(i, DONT_CHECK_ALIASING) = vp;
+ }
+ }
+ } else if (NativeObject* snapshot = debugEnv->maybeSnapshot()) {
+ if (action == GET) {
+ vp.set(snapshot->getDenseElement(i));
+ } else {
+ snapshot->setDenseElement(i, vp);
+ }
+ } else {
+ /* The unaliased value has been lost to the debugger. */
+ if (action == GET) {
+ *accessResult = ACCESS_LOST;
+ return true;
+ }
+ }
+ }
+
+ // It is possible that an optimized out value flows to this
+ // location due to Debugger.Frame.prototype.eval operating on a
+ // live bailed-out Baseline frame. In that case, treat the access
+ // as lost.
+ if (vp.isMagic() && vp.whyMagic() == JS_OPTIMIZED_OUT) {
+ *accessResult = ACCESS_LOST;
+ } else {
+ *accessResult = ACCESS_UNALIASED;
+ }
+
+ return true;
+ }
+
+ /*
+ * Handle unaliased vars in functions with parameter expressions and
+ * lexical bindings at block scope.
+ */
+ if (env->is<LexicalEnvironmentObject>() ||
+ env->is<VarEnvironmentObject>()) {
+ // Currently consider all global and non-syntactic top-level lexical
+ // bindings to be aliased.
+ if (env->is<LexicalEnvironmentObject>() &&
+ env->as<LexicalEnvironmentObject>().isExtensible()) {
+ MOZ_ASSERT(IsGlobalLexicalEnvironment(env) ||
+ !IsSyntacticEnvironment(env));
+ return true;
+ }
+
+ // Currently all vars inside non-strict eval var environments are aliased.
+ if (env->is<VarEnvironmentObject>() &&
+ env->as<VarEnvironmentObject>().isForNonStrictEval()) {
+ return true;
+ }
+
+ Rooted<Scope*> scope(cx, getEnvironmentScope(*env));
+ uint32_t firstFrameSlot = scope->firstFrameSlot();
+
+ BindingIter bi(scope);
+ while (bi && NameToId(bi.name()->asPropertyName()) != id) {
+ bi++;
+ }
+ if (!bi) {
+ return true;
+ }
+
+ if (action == SET && bi.kind() == BindingKind::Const) {
+ ReportRuntimeLexicalError(cx, JSMSG_BAD_CONST_ASSIGN, id);
+ return false;
+ }
+
+ BindingLocation loc = bi.location();
+ if (loc.kind() == BindingLocation::Kind::Environment) {
+ return true;
+ }
+
+ // Named lambdas that are not closed over are lost.
+ if (loc.kind() == BindingLocation::Kind::NamedLambdaCallee) {
+ if (action == GET) {
+ *accessResult = ACCESS_LOST;
+ }
+ return true;
+ }
+
+ MOZ_ASSERT(loc.kind() == BindingLocation::Kind::Frame);
+
+ if (maybeLiveEnv) {
+ AbstractFramePtr frame = maybeLiveEnv->frame();
+ uint32_t local = loc.slot();
+ MOZ_ASSERT(local < frame.script()->nfixed());
+ if (action == GET) {
+ vp.set(frame.unaliasedLocal(local));
+ } else {
+ frame.unaliasedLocal(local) = vp;
+ }
+ } else if (AbstractGeneratorObject* genObj =
+ GetGeneratorObjectForEnvironment(cx, debugEnv);
+ genObj && genObj->isSuspended() && genObj->hasStackStorage()) {
+ if (action == GET) {
+ vp.set(genObj->getUnaliasedLocal(loc.slot()));
+ } else {
+ genObj->setUnaliasedLocal(loc.slot(), vp);
+ }
+ } else if (NativeObject* snapshot = debugEnv->maybeSnapshot()) {
+ // Indices in the frame snapshot are offset by the first frame
+ // slot. See DebugEnvironments::takeFrameSnapshot.
+ MOZ_ASSERT(loc.slot() >= firstFrameSlot);
+ uint32_t snapshotIndex = loc.slot() - firstFrameSlot;
+ if (action == GET) {
+ vp.set(snapshot->getDenseElement(snapshotIndex));
+ } else {
+ snapshot->setDenseElement(snapshotIndex, vp);
+ }
+ } else {
+ if (action == GET) {
+ // A {Lexical,Var}EnvironmentObject whose static scope
+ // does not have an environment shape at all is a "hollow"
+ // block object reflected for missing block scopes. Their
+ // slot values are lost.
+ if (!scope->hasEnvironment()) {
+ *accessResult = ACCESS_LOST;
+ return true;
+ }
+
+ if (!GetProperty(cx, env, env, id, vp)) {
+ return false;
+ }
+ } else {
+ if (!SetProperty(cx, env, id, vp)) {
+ return false;
+ }
+ }
+ }
+
+ // See comment above in analogous CallObject case.
+ if (vp.isMagic() && vp.whyMagic() == JS_OPTIMIZED_OUT) {
+ *accessResult = ACCESS_LOST;
+ } else {
+ *accessResult = ACCESS_UNALIASED;
+ }
+
+ return true;
+ }
+
+ if (env->is<WasmFunctionCallObject>()) {
+ if (maybeLiveEnv) {
+ Rooted<Scope*> scope(cx, getEnvironmentScope(*env));
+ uint32_t index = 0;
+ for (BindingIter bi(scope); bi; bi++) {
+ if (id.isAtom(bi.name())) {
+ break;
+ }
+ MOZ_ASSERT(!bi.isLast());
+ index++;
+ }
+
+ AbstractFramePtr frame = maybeLiveEnv->frame();
+ MOZ_ASSERT(frame.isWasmDebugFrame());
+ wasm::DebugFrame* wasmFrame = frame.asWasmDebugFrame();
+ if (action == GET) {
+ if (!wasmFrame->getLocal(index, vp)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ *accessResult = ACCESS_UNALIASED;
+ } else { // if (action == SET)
+ // TODO
+ }
+ } else {
+ *accessResult = ACCESS_LOST;
+ }
+ return true;
+ }
+
+ if (env->is<WasmInstanceEnvironmentObject>()) {
+ Rooted<Scope*> scope(cx, getEnvironmentScope(*env));
+ MOZ_ASSERT(scope->is<WasmInstanceScope>());
+ uint32_t index = 0;
+ for (BindingIter bi(scope); bi; bi++) {
+ if (id.isAtom(bi.name())) {
+ break;
+ }
+ MOZ_ASSERT(!bi.isLast());
+ index++;
+ }
+ Rooted<WasmInstanceScope*> instanceScope(cx,
+ &scope->as<WasmInstanceScope>());
+ wasm::Instance& instance = instanceScope->instance()->instance();
+
+ if (action == GET) {
+ if (instanceScope->memoriesStart() <= index &&
+ index < instanceScope->globalsStart()) {
+ MOZ_ASSERT(instanceScope->memoriesStart() + 1 ==
+ instanceScope->globalsStart());
+ vp.set(ObjectValue(*instance.memory()));
+ }
+ if (instanceScope->globalsStart() <= index) {
+ MOZ_ASSERT(index < instanceScope->namesCount());
+ if (!instance.debug().getGlobal(
+ instance, index - instanceScope->globalsStart(), vp)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+ *accessResult = ACCESS_UNALIASED;
+ } else { // if (action == SET)
+ // TODO
+ }
+ return true;
+ }
+
+ /* The rest of the internal scopes do not have unaliased vars. */
+ MOZ_ASSERT(!IsSyntacticEnvironment(env) ||
+ env->is<WithEnvironmentObject>());
+ return true;
+ }
+
+ static bool isArguments(JSContext* cx, jsid id) {
+ return id == NameToId(cx->names().arguments);
+ }
+ static bool isThis(JSContext* cx, jsid id) {
+ return id == NameToId(cx->names().dotThis);
+ }
+
+ static bool isFunctionEnvironment(const JSObject& env) {
+ return env.is<CallObject>();
+ }
+
+ static bool isNonExtensibleLexicalEnvironment(const JSObject& env) {
+ return env.is<ScopedLexicalEnvironmentObject>();
+ }
+
+ static Scope* getEnvironmentScope(const JSObject& env) {
+ if (isFunctionEnvironment(env)) {
+ return env.as<CallObject>().callee().nonLazyScript()->bodyScope();
+ }
+ if (env.is<ModuleEnvironmentObject>()) {
+ JSScript* script =
+ env.as<ModuleEnvironmentObject>().module().maybeScript();
+ return script ? script->bodyScope() : nullptr;
+ }
+ if (isNonExtensibleLexicalEnvironment(env)) {
+ return &env.as<ScopedLexicalEnvironmentObject>().scope();
+ }
+ if (env.is<VarEnvironmentObject>()) {
+ return &env.as<VarEnvironmentObject>().scope();
+ }
+ if (env.is<WasmInstanceEnvironmentObject>()) {
+ return &env.as<WasmInstanceEnvironmentObject>().scope();
+ }
+ if (env.is<WasmFunctionCallObject>()) {
+ return &env.as<WasmFunctionCallObject>().scope();
+ }
+ return nullptr;
+ }
+
+ friend Scope* js::GetEnvironmentScope(const JSObject& env);
+
+ /*
+ * In theory, every non-arrow function scope contains an 'arguments'
+ * bindings. However, the engine only adds a binding if 'arguments' is
+ * used in the function body. Thus, from the debugger's perspective,
+ * 'arguments' may be missing from the list of bindings.
+ */
+ static bool isMissingArgumentsBinding(EnvironmentObject& env) {
+ return isFunctionEnvironment(env) &&
+ !env.as<CallObject>().callee().baseScript()->needsArgsObj();
+ }
+
+ /*
+ * Similar to 'arguments' above, we don't add a 'this' binding to
+ * non-arrow functions if it's not used.
+ */
+ static bool isMissingThisBinding(EnvironmentObject& env) {
+ return isFunctionEnvironmentWithThis(env) &&
+ !env.as<CallObject>()
+ .callee()
+ .baseScript()
+ ->functionHasThisBinding();
+ }
+
+ /*
+ * This function checks if an arguments object needs to be created when
+ * the debugger requests 'arguments' for a function scope where the
+ * arguments object was not otherwise needed.
+ */
+ static bool isMissingArguments(JSContext* cx, jsid id,
+ EnvironmentObject& env) {
+ return isArguments(cx, id) && isMissingArgumentsBinding(env);
+ }
+ static bool isMissingThis(JSContext* cx, jsid id, EnvironmentObject& env) {
+ return isThis(cx, id) && isMissingThisBinding(env);
+ }
+
+ /*
+ * If the value of |this| is requested before the this-binding has been
+ * initialized by JSOp::FunctionThis, the this-binding will be |undefined|.
+ * In that case, we have to call createMissingThis to initialize the
+ * this-binding.
+ *
+ * Note that an |undefined| this-binding is perfectly valid in strict-mode
+ * code, but that's fine: createMissingThis will do the right thing in that
+ * case.
+ */
+ static bool isMaybeUninitializedThisValue(JSContext* cx, jsid id,
+ const Value& v) {
+ return isThis(cx, id) && v.isUndefined();
+ }
+
+ /*
+ * Create a missing arguments object. If the function returns true but
+ * argsObj is null, it means the env is dead.
+ */
+ static bool createMissingArguments(JSContext* cx, EnvironmentObject& env,
+ MutableHandleArgumentsObject argsObj) {
+ argsObj.set(nullptr);
+
+ LiveEnvironmentVal* maybeEnv = DebugEnvironments::hasLiveEnvironment(env);
+ if (!maybeEnv) {
+ return true;
+ }
+
+ argsObj.set(ArgumentsObject::createUnexpected(cx, maybeEnv->frame()));
+ return !!argsObj;
+ }
+
+ /*
+ * Create a missing this Value. If the function returns true but
+ * *success is false, it means the scope is dead.
+ */
+ static bool createMissingThis(JSContext* cx, EnvironmentObject& env,
+ MutableHandleValue thisv, bool* success) {
+ *success = false;
+
+ LiveEnvironmentVal* maybeEnv = DebugEnvironments::hasLiveEnvironment(env);
+ if (!maybeEnv) {
+ return true;
+ }
+
+ if (!GetFunctionThis(cx, maybeEnv->frame(), thisv)) {
+ return false;
+ }
+
+ // Update the this-argument to avoid boxing primitive |this| more
+ // than once.
+ maybeEnv->frame().thisArgument() = thisv;
+ *success = true;
+ return true;
+ }
+
+ static void reportOptimizedOut(JSContext* cx, HandleId id) {
+ if (isThis(cx, id)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_DEBUG_OPTIMIZED_OUT, "this");
+ return;
+ }
+
+ if (UniqueChars printable =
+ IdToPrintableUTF8(cx, id, IdToPrintableBehavior::IdIsIdentifier)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_DEBUG_OPTIMIZED_OUT, printable.get());
+ }
+ }
+
+ public:
+ static const char family;
+ static const DebugEnvironmentProxyHandler singleton;
+
+ constexpr DebugEnvironmentProxyHandler() : BaseProxyHandler(&family) {}
+
+ static bool isFunctionEnvironmentWithThis(const JSObject& env) {
+ // All functions except arrows should have their own this binding.
+ return isFunctionEnvironment(env) &&
+ !env.as<CallObject>().callee().hasLexicalThis();
+ }
+
+ bool getPrototypeIfOrdinary(JSContext* cx, HandleObject proxy,
+ bool* isOrdinary,
+ MutableHandleObject protop) const override {
+ MOZ_CRASH(
+ "shouldn't be possible to access the prototype chain of a "
+ "DebugEnvironmentProxyHandler");
+ }
+
+ bool preventExtensions(JSContext* cx, HandleObject proxy,
+ ObjectOpResult& result) const override {
+ // always [[Extensible]], can't be made non-[[Extensible]], like most
+ // proxies
+ return result.fail(JSMSG_CANT_CHANGE_EXTENSIBILITY);
+ }
+
+ bool isExtensible(JSContext* cx, HandleObject proxy,
+ bool* extensible) const override {
+ // See above.
+ *extensible = true;
+ return true;
+ }
+
+ bool getMissingArgumentsPropertyDescriptor(
+ JSContext* cx, Handle<DebugEnvironmentProxy*> debugEnv,
+ EnvironmentObject& env,
+ MutableHandle<mozilla::Maybe<PropertyDescriptor>> desc) const {
+ RootedArgumentsObject argsObj(cx);
+ if (!createMissingArguments(cx, env, &argsObj)) {
+ return false;
+ }
+
+ if (!argsObj) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_DEBUG_NOT_ON_STACK, "Debugger scope");
+ return false;
+ }
+
+ desc.set(mozilla::Some(PropertyDescriptor::Data(
+ ObjectValue(*argsObj), {JS::PropertyAttribute::Enumerable})));
+ return true;
+ }
+ bool getMissingThisPropertyDescriptor(
+ JSContext* cx, Handle<DebugEnvironmentProxy*> debugEnv,
+ EnvironmentObject& env,
+ MutableHandle<mozilla::Maybe<PropertyDescriptor>> desc) const {
+ RootedValue thisv(cx);
+ bool success;
+ if (!createMissingThis(cx, env, &thisv, &success)) {
+ return false;
+ }
+
+ if (!success) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_DEBUG_NOT_ON_STACK, "Debugger scope");
+ return false;
+ }
+
+ desc.set(mozilla::Some(
+ PropertyDescriptor::Data(thisv, {JS::PropertyAttribute::Enumerable})));
+ return true;
+ }
+
+ bool getOwnPropertyDescriptor(
+ JSContext* cx, HandleObject proxy, HandleId id,
+ MutableHandle<mozilla::Maybe<PropertyDescriptor>> desc) const override {
+ Rooted<DebugEnvironmentProxy*> debugEnv(
+ cx, &proxy->as<DebugEnvironmentProxy>());
+ Rooted<EnvironmentObject*> env(cx, &debugEnv->environment());
+
+ if (isMissingArguments(cx, id, *env)) {
+ return getMissingArgumentsPropertyDescriptor(cx, debugEnv, *env, desc);
+ }
+
+ if (isMissingThis(cx, id, *env)) {
+ return getMissingThisPropertyDescriptor(cx, debugEnv, *env, desc);
+ }
+
+ RootedValue v(cx);
+ AccessResult access;
+ if (!handleUnaliasedAccess(cx, debugEnv, env, id, GET, &v, &access)) {
+ return false;
+ }
+
+ switch (access) {
+ case ACCESS_UNALIASED: {
+ desc.set(mozilla::Some(
+ PropertyDescriptor::Data(v, {JS::PropertyAttribute::Enumerable})));
+ return true;
+ }
+ case ACCESS_GENERIC:
+ return GetOwnPropertyDescriptor(cx, env, id, desc);
+ case ACCESS_LOST:
+ reportOptimizedOut(cx, id);
+ return false;
+ default:
+ MOZ_CRASH("bad AccessResult");
+ }
+ }
+
+ bool getMissingArguments(JSContext* cx, EnvironmentObject& env,
+ MutableHandleValue vp) const {
+ RootedArgumentsObject argsObj(cx);
+ if (!createMissingArguments(cx, env, &argsObj)) {
+ return false;
+ }
+
+ if (!argsObj) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_DEBUG_NOT_ON_STACK, "Debugger env");
+ return false;
+ }
+
+ vp.setObject(*argsObj);
+ return true;
+ }
+
+ bool getMissingThis(JSContext* cx, EnvironmentObject& env,
+ MutableHandleValue vp) const {
+ RootedValue thisv(cx);
+ bool success;
+ if (!createMissingThis(cx, env, &thisv, &success)) {
+ return false;
+ }
+
+ if (!success) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_DEBUG_NOT_ON_STACK, "Debugger env");
+ return false;
+ }
+
+ vp.set(thisv);
+ return true;
+ }
+
+ bool get(JSContext* cx, HandleObject proxy, HandleValue receiver, HandleId id,
+ MutableHandleValue vp) const override {
+ Rooted<DebugEnvironmentProxy*> debugEnv(
+ cx, &proxy->as<DebugEnvironmentProxy>());
+ Rooted<EnvironmentObject*> env(
+ cx, &proxy->as<DebugEnvironmentProxy>().environment());
+
+ if (isMissingArguments(cx, id, *env)) {
+ return getMissingArguments(cx, *env, vp);
+ }
+
+ if (isMissingThis(cx, id, *env)) {
+ return getMissingThis(cx, *env, vp);
+ }
+
+ AccessResult access;
+ if (!handleUnaliasedAccess(cx, debugEnv, env, id, GET, vp, &access)) {
+ return false;
+ }
+
+ switch (access) {
+ case ACCESS_UNALIASED:
+ if (isMaybeUninitializedThisValue(cx, id, vp)) {
+ return getMissingThis(cx, *env, vp);
+ }
+ return true;
+ case ACCESS_GENERIC:
+ if (!GetProperty(cx, env, env, id, vp)) {
+ return false;
+ }
+ if (isMaybeUninitializedThisValue(cx, id, vp)) {
+ return getMissingThis(cx, *env, vp);
+ }
+ return true;
+ case ACCESS_LOST:
+ reportOptimizedOut(cx, id);
+ return false;
+ default:
+ MOZ_CRASH("bad AccessResult");
+ }
+ }
+
+ bool getMissingArgumentsMaybeSentinelValue(JSContext* cx,
+ EnvironmentObject& env,
+ MutableHandleValue vp) const {
+ RootedArgumentsObject argsObj(cx);
+ if (!createMissingArguments(cx, env, &argsObj)) {
+ return false;
+ }
+ vp.set(argsObj ? ObjectValue(*argsObj) : MagicValue(JS_MISSING_ARGUMENTS));
+ return true;
+ }
+
+ bool getMissingThisMaybeSentinelValue(JSContext* cx, EnvironmentObject& env,
+ MutableHandleValue vp) const {
+ RootedValue thisv(cx);
+ bool success;
+ if (!createMissingThis(cx, env, &thisv, &success)) {
+ return false;
+ }
+ vp.set(success ? thisv : MagicValue(JS_OPTIMIZED_OUT));
+ return true;
+ }
+
+ /*
+ * Like 'get', but returns sentinel values instead of throwing on
+ * exceptional cases.
+ */
+ bool getMaybeSentinelValue(JSContext* cx,
+ Handle<DebugEnvironmentProxy*> debugEnv,
+ HandleId id, MutableHandleValue vp) const {
+ Rooted<EnvironmentObject*> env(cx, &debugEnv->environment());
+
+ if (isMissingArguments(cx, id, *env)) {
+ return getMissingArgumentsMaybeSentinelValue(cx, *env, vp);
+ }
+ if (isMissingThis(cx, id, *env)) {
+ return getMissingThisMaybeSentinelValue(cx, *env, vp);
+ }
+
+ AccessResult access;
+ if (!handleUnaliasedAccess(cx, debugEnv, env, id, GET, vp, &access)) {
+ return false;
+ }
+
+ switch (access) {
+ case ACCESS_UNALIASED:
+ if (isMaybeUninitializedThisValue(cx, id, vp)) {
+ return getMissingThisMaybeSentinelValue(cx, *env, vp);
+ }
+ return true;
+ case ACCESS_GENERIC:
+ if (!GetProperty(cx, env, env, id, vp)) {
+ return false;
+ }
+ if (isMaybeUninitializedThisValue(cx, id, vp)) {
+ return getMissingThisMaybeSentinelValue(cx, *env, vp);
+ }
+ return true;
+ case ACCESS_LOST:
+ vp.setMagic(JS_OPTIMIZED_OUT);
+ return true;
+ default:
+ MOZ_CRASH("bad AccessResult");
+ }
+ }
+
+ bool set(JSContext* cx, HandleObject proxy, HandleId id, HandleValue v,
+ HandleValue receiver, ObjectOpResult& result) const override {
+ Rooted<DebugEnvironmentProxy*> debugEnv(
+ cx, &proxy->as<DebugEnvironmentProxy>());
+ Rooted<EnvironmentObject*> env(
+ cx, &proxy->as<DebugEnvironmentProxy>().environment());
+
+ if (debugEnv->isOptimizedOut()) {
+ return Throw(cx, id, JSMSG_DEBUG_CANT_SET_OPT_ENV);
+ }
+
+ AccessResult access;
+ RootedValue valCopy(cx, v);
+ if (!handleUnaliasedAccess(cx, debugEnv, env, id, SET, &valCopy, &access)) {
+ return false;
+ }
+
+ switch (access) {
+ case ACCESS_UNALIASED:
+ return result.succeed();
+ case ACCESS_GENERIC: {
+ RootedValue envVal(cx, ObjectValue(*env));
+ return SetProperty(cx, env, id, v, envVal, result);
+ }
+ default:
+ MOZ_CRASH("bad AccessResult");
+ }
+ }
+
+ bool defineProperty(JSContext* cx, HandleObject proxy, HandleId id,
+ Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result) const override {
+ Rooted<EnvironmentObject*> env(
+ cx, &proxy->as<DebugEnvironmentProxy>().environment());
+
+ bool found;
+ if (!has(cx, proxy, id, &found)) {
+ return false;
+ }
+ if (found) {
+ return Throw(cx, id, JSMSG_CANT_REDEFINE_PROP);
+ }
+
+ return JS_DefinePropertyById(cx, env, id, desc, result);
+ }
+
+ bool ownPropertyKeys(JSContext* cx, HandleObject proxy,
+ MutableHandleIdVector props) const override {
+ Rooted<EnvironmentObject*> env(
+ cx, &proxy->as<DebugEnvironmentProxy>().environment());
+
+ if (isMissingArgumentsBinding(*env)) {
+ if (!props.append(NameToId(cx->names().arguments))) {
+ return false;
+ }
+ }
+ if (isMissingThisBinding(*env)) {
+ if (!props.append(NameToId(cx->names().dotThis))) {
+ return false;
+ }
+ }
+
+ // WithEnvironmentObject isn't a very good proxy. It doesn't have a
+ // JSNewEnumerateOp implementation, because if it just delegated to the
+ // target object, the object would indicate that native enumeration is
+ // the thing to do, but native enumeration over the WithEnvironmentObject
+ // wrapper yields no properties. So instead here we hack around the
+ // issue: punch a hole through to the with object target, then manually
+ // examine @@unscopables.
+ RootedObject target(cx);
+ bool isWith = env->is<WithEnvironmentObject>();
+ if (isWith) {
+ target = &env->as<WithEnvironmentObject>().object();
+ } else {
+ target = env;
+ }
+ if (!GetPropertyKeys(cx, target, JSITER_OWNONLY, props)) {
+ return false;
+ }
+
+ if (isWith) {
+ size_t j = 0;
+ for (size_t i = 0; i < props.length(); i++) {
+ bool inScope;
+ if (!CheckUnscopables(cx, env, props[i], &inScope)) {
+ return false;
+ }
+ if (inScope) {
+ props[j++].set(props[i]);
+ }
+ }
+ if (!props.resize(j)) {
+ return false;
+ }
+ }
+
+ /*
+ * Environments with Scopes are optimized to not contain unaliased
+ * variables so they must be manually appended here.
+ */
+ if (Scope* scope = getEnvironmentScope(*env)) {
+ for (Rooted<BindingIter> bi(cx, BindingIter(scope)); bi; bi++) {
+ if (!bi.closedOver() &&
+ !props.append(NameToId(bi.name()->asPropertyName()))) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+ bool has(JSContext* cx, HandleObject proxy, HandleId id_,
+ bool* bp) const override {
+ RootedId id(cx, id_);
+ EnvironmentObject& envObj =
+ proxy->as<DebugEnvironmentProxy>().environment();
+
+ if (isArguments(cx, id) && isFunctionEnvironment(envObj)) {
+ *bp = true;
+ return true;
+ }
+
+ // Be careful not to look up '.this' as a normal binding below, it will
+ // assert in with_HasProperty.
+ if (isThis(cx, id)) {
+ *bp = isFunctionEnvironmentWithThis(envObj);
+ return true;
+ }
+
+ bool found;
+ RootedObject env(cx, &envObj);
+ if (!JS_HasPropertyById(cx, env, id, &found)) {
+ return false;
+ }
+
+ if (!found) {
+ if (Scope* scope = getEnvironmentScope(*env)) {
+ for (BindingIter bi(scope); bi; bi++) {
+ if (!bi.closedOver() && NameToId(bi.name()->asPropertyName()) == id) {
+ found = true;
+ break;
+ }
+ }
+ }
+ }
+
+ *bp = found;
+ return true;
+ }
+
+ bool delete_(JSContext* cx, HandleObject proxy, HandleId id,
+ ObjectOpResult& result) const override {
+ return result.fail(JSMSG_CANT_DELETE);
+ }
+};
+
+} /* anonymous namespace */
+
+Scope* js::GetEnvironmentScope(const JSObject& env) {
+ return DebugEnvironmentProxyHandler::getEnvironmentScope(env);
+}
+
+template <>
+bool JSObject::is<js::DebugEnvironmentProxy>() const {
+ return IsDerivedProxyObject(this, &DebugEnvironmentProxyHandler::singleton);
+}
+
+const char DebugEnvironmentProxyHandler::family = 0;
+const DebugEnvironmentProxyHandler DebugEnvironmentProxyHandler::singleton;
+
+/* static */
+DebugEnvironmentProxy* DebugEnvironmentProxy::create(JSContext* cx,
+ EnvironmentObject& env,
+ HandleObject enclosing) {
+ MOZ_ASSERT(env.realm() == cx->realm());
+ MOZ_ASSERT(!enclosing->is<EnvironmentObject>());
+
+ RootedValue priv(cx, ObjectValue(env));
+ JSObject* obj = NewProxyObject(cx, &DebugEnvironmentProxyHandler::singleton,
+ priv, nullptr /* proto */);
+ if (!obj) {
+ return nullptr;
+ }
+
+ DebugEnvironmentProxy* debugEnv = &obj->as<DebugEnvironmentProxy>();
+ debugEnv->setReservedSlot(ENCLOSING_SLOT, ObjectValue(*enclosing));
+ debugEnv->setReservedSlot(SNAPSHOT_SLOT, NullValue());
+
+ return debugEnv;
+}
+
+EnvironmentObject& DebugEnvironmentProxy::environment() const {
+ return target()->as<EnvironmentObject>();
+}
+
+JSObject& DebugEnvironmentProxy::enclosingEnvironment() const {
+ return reservedSlot(ENCLOSING_SLOT).toObject();
+}
+
+ArrayObject* DebugEnvironmentProxy::maybeSnapshot() const {
+ JSObject* obj = reservedSlot(SNAPSHOT_SLOT).toObjectOrNull();
+ return obj ? &obj->as<ArrayObject>() : nullptr;
+}
+
+void DebugEnvironmentProxy::initSnapshot(ArrayObject& o) {
+#ifdef DEBUG
+ if (maybeSnapshot()) {
+ auto* callObj = CallObject::find(&environment());
+ if (callObj) {
+ MOZ_ASSERT(callObj->callee().isGeneratorOrAsync());
+ } else {
+ auto* moduleEnv = ModuleEnvironmentObject::find(&environment());
+ MOZ_ASSERT(moduleEnv);
+ MOZ_ASSERT(moduleEnv->module().hasTopLevelAwait());
+ }
+ }
+#endif
+
+ setReservedSlot(SNAPSHOT_SLOT, ObjectValue(o));
+}
+
+bool DebugEnvironmentProxy::isForDeclarative() const {
+ EnvironmentObject& e = environment();
+ return e.is<CallObject>() || e.is<VarEnvironmentObject>() ||
+ e.is<ModuleEnvironmentObject>() ||
+ e.is<WasmInstanceEnvironmentObject>() ||
+ e.is<WasmFunctionCallObject>() || e.is<LexicalEnvironmentObject>();
+}
+
+/* static */
+bool DebugEnvironmentProxy::getMaybeSentinelValue(
+ JSContext* cx, Handle<DebugEnvironmentProxy*> env, HandleId id,
+ MutableHandleValue vp) {
+ return DebugEnvironmentProxyHandler::singleton.getMaybeSentinelValue(cx, env,
+ id, vp);
+}
+
+bool DebugEnvironmentProxy::isFunctionEnvironmentWithThis() {
+ return DebugEnvironmentProxyHandler::isFunctionEnvironmentWithThis(
+ environment());
+}
+
+bool DebugEnvironmentProxy::isOptimizedOut() const {
+ EnvironmentObject& e = environment();
+
+ if (DebugEnvironments::hasLiveEnvironment(e)) {
+ return false;
+ }
+
+ if (e.is<LexicalEnvironmentObject>()) {
+ return e.is<BlockLexicalEnvironmentObject>() &&
+ !e.as<BlockLexicalEnvironmentObject>().scope().hasEnvironment();
+ }
+
+ if (e.is<CallObject>()) {
+ return !e.as<CallObject>().callee().needsCallObject() && !maybeSnapshot();
+ }
+
+ return false;
+}
+
+/*****************************************************************************/
+
+DebugEnvironments::DebugEnvironments(JSContext* cx, Zone* zone)
+ : zone_(zone),
+ proxiedEnvs(cx),
+ missingEnvs(cx->zone()),
+ liveEnvs(cx->zone()) {}
+
+DebugEnvironments::~DebugEnvironments() { MOZ_ASSERT(missingEnvs.empty()); }
+
+void DebugEnvironments::trace(JSTracer* trc) { proxiedEnvs.trace(trc); }
+
+void DebugEnvironments::traceWeak(JSTracer* trc) {
+ /*
+ * missingEnvs points to debug envs weakly so that debug envs can be
+ * released more eagerly.
+ */
+ for (MissingEnvironmentMap::Enum e(missingEnvs); !e.empty(); e.popFront()) {
+ auto result =
+ TraceWeakEdge(trc, &e.front().value(), "MissingEnvironmentMap value");
+ if (result.isDead()) {
+ /*
+ * Note that onPopCall, onPopVar, and onPopLexical rely on missingEnvs to
+ * find environment objects that we synthesized for the debugger's sake,
+ * and clean up the synthetic environment objects' entries in liveEnvs.
+ * So if we remove an entry from missingEnvs here, we must also remove the
+ * corresponding liveEnvs entry.
+ *
+ * Since the DebugEnvironmentProxy is the only thing using its environment
+ * object, and the DSO is about to be finalized, you might assume that the
+ * synthetic SO is also about to be finalized too, and thus the loop below
+ * will take care of things. But complex GC behavior means that marks are
+ * only conservative approximations of liveness; we should assume that
+ * anything could be marked.
+ *
+ * Thus, we must explicitly remove the entries from both liveEnvs and
+ * missingEnvs here.
+ */
+ liveEnvs.remove(&result.initialTarget()->environment());
+ e.removeFront();
+ } else {
+ MissingEnvironmentKey key = e.front().key();
+ Scope* scope = key.scope();
+ MOZ_ALWAYS_TRUE(TraceManuallyBarrieredWeakEdge(
+ trc, &scope, "MissingEnvironmentKey scope"));
+ if (scope != key.scope()) {
+ key.updateScope(scope);
+ e.rekeyFront(key);
+ }
+ }
+ }
+
+ /*
+ * Scopes can be finalized when a debugger-synthesized EnvironmentObject is
+ * no longer reachable via its DebugEnvironmentProxy.
+ */
+ liveEnvs.traceWeak(trc);
+}
+
+void DebugEnvironments::finish() { proxiedEnvs.clear(); }
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void DebugEnvironments::checkHashTablesAfterMovingGC() {
+ /*
+ * This is called at the end of StoreBuffer::mark() to check that our
+ * postbarriers have worked and that no hashtable keys (or values) are left
+ * pointing into the nursery.
+ */
+ proxiedEnvs.checkAfterMovingGC();
+ for (MissingEnvironmentMap::Range r = missingEnvs.all(); !r.empty();
+ r.popFront()) {
+ CheckGCThingAfterMovingGC(r.front().key().scope());
+ // Use unbarrieredGet() to prevent triggering read barrier while collecting.
+ CheckGCThingAfterMovingGC(r.front().value().unbarrieredGet());
+ }
+ for (LiveEnvironmentMap::Range r = liveEnvs.all(); !r.empty(); r.popFront()) {
+ CheckGCThingAfterMovingGC(r.front().key());
+ CheckGCThingAfterMovingGC(r.front().value().scope_.get());
+ }
+}
+#endif
+
+/*
+ * Unfortunately, GetDebugEnvironmentForFrame needs to work even outside debug
+ * mode (in particular, JS_GetFrameScopeChain does not require debug mode).
+ * Since DebugEnvironments::onPop* are only called in debuggee frames, this
+ * means we cannot use any of the maps in DebugEnvironments. This will produce
+ * debug scope chains that do not obey the debugger invariants but that is just
+ * fine.
+ */
+static bool CanUseDebugEnvironmentMaps(JSContext* cx) {
+ return cx->realm()->isDebuggee();
+}
+
+DebugEnvironments* DebugEnvironments::ensureRealmData(JSContext* cx) {
+ Realm* realm = cx->realm();
+ if (auto* debugEnvs = realm->debugEnvs()) {
+ return debugEnvs;
+ }
+
+ auto debugEnvs = cx->make_unique<DebugEnvironments>(cx, cx->zone());
+ if (!debugEnvs) {
+ return nullptr;
+ }
+
+ realm->debugEnvsRef() = std::move(debugEnvs);
+ return realm->debugEnvs();
+}
+
+/* static */
+DebugEnvironmentProxy* DebugEnvironments::hasDebugEnvironment(
+ JSContext* cx, EnvironmentObject& env) {
+ DebugEnvironments* envs = env.realm()->debugEnvs();
+ if (!envs) {
+ return nullptr;
+ }
+
+ if (JSObject* obj = envs->proxiedEnvs.lookup(&env)) {
+ MOZ_ASSERT(CanUseDebugEnvironmentMaps(cx));
+ return &obj->as<DebugEnvironmentProxy>();
+ }
+
+ return nullptr;
+}
+
+/* static */
+bool DebugEnvironments::addDebugEnvironment(
+ JSContext* cx, Handle<EnvironmentObject*> env,
+ Handle<DebugEnvironmentProxy*> debugEnv) {
+ MOZ_ASSERT(cx->realm() == env->realm());
+ MOZ_ASSERT(cx->realm() == debugEnv->nonCCWRealm());
+
+ if (!CanUseDebugEnvironmentMaps(cx)) {
+ return true;
+ }
+
+ DebugEnvironments* envs = ensureRealmData(cx);
+ if (!envs) {
+ return false;
+ }
+
+ return envs->proxiedEnvs.add(cx, env, debugEnv);
+}
+
+/* static */
+DebugEnvironmentProxy* DebugEnvironments::hasDebugEnvironment(
+ JSContext* cx, const EnvironmentIter& ei) {
+ MOZ_ASSERT(!ei.hasSyntacticEnvironment());
+
+ DebugEnvironments* envs = cx->realm()->debugEnvs();
+ if (!envs) {
+ return nullptr;
+ }
+
+ if (MissingEnvironmentMap::Ptr p =
+ envs->missingEnvs.lookup(MissingEnvironmentKey(ei))) {
+ MOZ_ASSERT(CanUseDebugEnvironmentMaps(cx));
+ return p->value();
+ }
+ return nullptr;
+}
+
+/* static */
+bool DebugEnvironments::addDebugEnvironment(
+ JSContext* cx, const EnvironmentIter& ei,
+ Handle<DebugEnvironmentProxy*> debugEnv) {
+ MOZ_ASSERT(!ei.hasSyntacticEnvironment());
+ MOZ_ASSERT(cx->realm() == debugEnv->nonCCWRealm());
+
+ if (!CanUseDebugEnvironmentMaps(cx)) {
+ return true;
+ }
+
+ DebugEnvironments* envs = ensureRealmData(cx);
+ if (!envs) {
+ return false;
+ }
+
+ MissingEnvironmentKey key(ei);
+ MOZ_ASSERT(!envs->missingEnvs.has(key));
+ if (!envs->missingEnvs.put(key,
+ WeakHeapPtr<DebugEnvironmentProxy*>(debugEnv))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Only add to liveEnvs if we synthesized the debug env on a live
+ // frame.
+ if (ei.withinInitialFrame()) {
+ MOZ_ASSERT(!envs->liveEnvs.has(&debugEnv->environment()));
+ if (!envs->liveEnvs.put(&debugEnv->environment(), LiveEnvironmentVal(ei))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* static */
+void DebugEnvironments::takeFrameSnapshot(
+ JSContext* cx, Handle<DebugEnvironmentProxy*> debugEnv,
+ AbstractFramePtr frame) {
+ /*
+ * When the JS stack frame is popped, the values of unaliased variables
+ * are lost. If there is any debug env referring to this environment, save a
+ * copy of the unaliased variables' values in an array for later debugger
+ * access via DebugEnvironmentProxy::handleUnaliasedAccess.
+ *
+ * Note: since it is simplest for this function to be infallible, failure
+ * in this code will be silently ignored. This does not break any
+ * invariants since DebugEnvironmentProxy::maybeSnapshot can already be
+ * nullptr.
+ */
+
+ // Because this can be called during exception unwinding, save the exception
+ // state and restore it when we're done.
+ JS::AutoSaveExceptionState ases(cx);
+
+ JSScript* script = frame.script();
+
+ // Act like no snapshot was taken if we run OOM while taking the snapshot.
+ Rooted<GCVector<Value>> vec(cx, GCVector<Value>(cx));
+ if (debugEnv->environment().is<CallObject>()) {
+ FunctionScope* scope = &script->bodyScope()->as<FunctionScope>();
+ uint32_t frameSlotCount = scope->nextFrameSlot();
+ MOZ_ASSERT(frameSlotCount <= script->nfixed());
+
+ // For simplicity, copy all frame slots from 0 to the frameSlotCount,
+ // even if we don't need all of them (like in the case of a defaults
+ // parameter scope having frame slots).
+ uint32_t numFormals = frame.numFormalArgs();
+ if (!vec.resize(numFormals + frameSlotCount)) {
+ cx->recoverFromOutOfMemory();
+ return;
+ }
+ mozilla::PodCopy(vec.begin(), frame.argv(), numFormals);
+ for (uint32_t slot = 0; slot < frameSlotCount; slot++) {
+ vec[slot + frame.numFormalArgs()].set(frame.unaliasedLocal(slot));
+ }
+
+ /*
+ * Copy in formals that are not aliased via the scope chain
+ * but are aliased via the arguments object.
+ */
+ if (script->needsArgsObj() && frame.hasArgsObj()) {
+ for (unsigned i = 0; i < frame.numFormalArgs(); ++i) {
+ if (script->formalLivesInArgumentsObject(i)) {
+ vec[i].set(frame.argsObj().arg(i));
+ }
+ }
+ }
+ } else {
+ uint32_t frameSlotStart;
+ uint32_t frameSlotEnd;
+
+ if (debugEnv->environment().is<BlockLexicalEnvironmentObject>()) {
+ LexicalScope* scope =
+ &debugEnv->environment().as<BlockLexicalEnvironmentObject>().scope();
+ frameSlotStart = scope->firstFrameSlot();
+ frameSlotEnd = scope->nextFrameSlot();
+ } else if (debugEnv->environment()
+ .is<ClassBodyLexicalEnvironmentObject>()) {
+ ClassBodyScope* scope = &debugEnv->environment()
+ .as<ClassBodyLexicalEnvironmentObject>()
+ .scope();
+ frameSlotStart = scope->firstFrameSlot();
+ frameSlotEnd = scope->nextFrameSlot();
+ } else if (debugEnv->environment().is<VarEnvironmentObject>()) {
+ VarEnvironmentObject* env =
+ &debugEnv->environment().as<VarEnvironmentObject>();
+ if (frame.isFunctionFrame()) {
+ VarScope* scope = &env->scope().as<VarScope>();
+ frameSlotStart = scope->firstFrameSlot();
+ frameSlotEnd = scope->nextFrameSlot();
+ } else {
+ EvalScope* scope = &env->scope().as<EvalScope>();
+ MOZ_ASSERT(scope == script->bodyScope());
+ frameSlotStart = 0;
+ frameSlotEnd = scope->nextFrameSlot();
+ }
+ } else {
+ MOZ_ASSERT(&debugEnv->environment().as<ModuleEnvironmentObject>() ==
+ script->module()->environment());
+ ModuleScope* scope = &script->bodyScope()->as<ModuleScope>();
+ frameSlotStart = 0;
+ frameSlotEnd = scope->nextFrameSlot();
+ }
+
+ uint32_t frameSlotCount = frameSlotEnd - frameSlotStart;
+ MOZ_ASSERT(frameSlotCount <= script->nfixed());
+
+ if (!vec.resize(frameSlotCount)) {
+ cx->recoverFromOutOfMemory();
+ return;
+ }
+ for (uint32_t slot = frameSlotStart; slot < frameSlotCount; slot++) {
+ vec[slot - frameSlotStart].set(frame.unaliasedLocal(slot));
+ }
+ }
+
+ if (vec.length() == 0) {
+ return;
+ }
+
+ /*
+ * Use a dense array as storage (since proxies do not have trace
+ * hooks). This array must not escape into the wild.
+ */
+ Rooted<ArrayObject*> snapshot(
+ cx, NewDenseCopiedArray(cx, vec.length(), vec.begin()));
+ if (!snapshot) {
+ MOZ_ASSERT(cx->isThrowingOutOfMemory() || cx->isThrowingOverRecursed());
+ cx->clearPendingException();
+ return;
+ }
+
+ debugEnv->initSnapshot(*snapshot);
+}
+
+/* static */
+void DebugEnvironments::onPopCall(JSContext* cx, AbstractFramePtr frame) {
+ cx->check(frame);
+
+ DebugEnvironments* envs = cx->realm()->debugEnvs();
+ if (!envs) {
+ return;
+ }
+
+ Rooted<DebugEnvironmentProxy*> debugEnv(cx, nullptr);
+
+ FunctionScope* funScope = &frame.script()->bodyScope()->as<FunctionScope>();
+ if (funScope->hasEnvironment()) {
+ MOZ_ASSERT(frame.callee()->needsCallObject());
+
+ /*
+ * The frame may be observed before the prologue has created the
+ * CallObject. See EnvironmentIter::settle.
+ */
+ if (!frame.environmentChain()->is<CallObject>()) {
+ return;
+ }
+
+ CallObject& callobj = frame.environmentChain()->as<CallObject>();
+ envs->liveEnvs.remove(&callobj);
+ if (JSObject* obj = envs->proxiedEnvs.lookup(&callobj)) {
+ debugEnv = &obj->as<DebugEnvironmentProxy>();
+ }
+ } else {
+ MissingEnvironmentKey key(frame, funScope);
+ if (MissingEnvironmentMap::Ptr p = envs->missingEnvs.lookup(key)) {
+ debugEnv = p->value();
+ envs->liveEnvs.remove(&debugEnv->environment().as<CallObject>());
+ envs->missingEnvs.remove(p);
+ }
+ }
+
+ if (debugEnv) {
+ DebugEnvironments::takeFrameSnapshot(cx, debugEnv, frame);
+ }
+}
+
+void DebugEnvironments::onPopLexical(JSContext* cx, AbstractFramePtr frame,
+ const jsbytecode* pc) {
+ cx->check(frame);
+
+ DebugEnvironments* envs = cx->realm()->debugEnvs();
+ if (!envs) {
+ return;
+ }
+
+ EnvironmentIter ei(cx, frame, pc);
+ onPopLexical(cx, ei);
+}
+
+template <typename Environment, typename Scope>
+void DebugEnvironments::onPopGeneric(JSContext* cx, const EnvironmentIter& ei) {
+ DebugEnvironments* envs = cx->realm()->debugEnvs();
+ if (!envs) {
+ return;
+ }
+
+ MOZ_ASSERT(ei.withinInitialFrame());
+ MOZ_ASSERT(ei.scope().is<Scope>());
+
+ Rooted<Environment*> env(cx);
+ if (MissingEnvironmentMap::Ptr p =
+ envs->missingEnvs.lookup(MissingEnvironmentKey(ei))) {
+ env = &p->value()->environment().as<Environment>();
+ envs->missingEnvs.remove(p);
+ } else if (ei.hasSyntacticEnvironment()) {
+ env = &ei.environment().as<Environment>();
+ }
+
+ if (env) {
+ envs->liveEnvs.remove(env);
+
+ if (JSObject* obj = envs->proxiedEnvs.lookup(env)) {
+ Rooted<DebugEnvironmentProxy*> debugEnv(
+ cx, &obj->as<DebugEnvironmentProxy>());
+ DebugEnvironments::takeFrameSnapshot(cx, debugEnv, ei.initialFrame());
+ }
+ }
+}
+
+void DebugEnvironments::onPopLexical(JSContext* cx, const EnvironmentIter& ei) {
+ if (ei.scope().is<ClassBodyScope>()) {
+ onPopGeneric<ScopedLexicalEnvironmentObject, ClassBodyScope>(cx, ei);
+ } else {
+ onPopGeneric<ScopedLexicalEnvironmentObject, LexicalScope>(cx, ei);
+ }
+}
+
+void DebugEnvironments::onPopVar(JSContext* cx, const EnvironmentIter& ei) {
+ if (ei.scope().is<EvalScope>()) {
+ onPopGeneric<VarEnvironmentObject, EvalScope>(cx, ei);
+ } else {
+ onPopGeneric<VarEnvironmentObject, VarScope>(cx, ei);
+ }
+}
+
+void DebugEnvironments::onPopWith(AbstractFramePtr frame) {
+ Realm* realm = frame.realm();
+ if (DebugEnvironments* envs = realm->debugEnvs()) {
+ envs->liveEnvs.remove(
+ &frame.environmentChain()->as<WithEnvironmentObject>());
+ }
+}
+
+void DebugEnvironments::onPopModule(JSContext* cx, const EnvironmentIter& ei) {
+ onPopGeneric<ModuleEnvironmentObject, ModuleScope>(cx, ei);
+}
+
+void DebugEnvironments::onRealmUnsetIsDebuggee(Realm* realm) {
+ if (DebugEnvironments* envs = realm->debugEnvs()) {
+ envs->proxiedEnvs.clear();
+ envs->missingEnvs.clear();
+ envs->liveEnvs.clear();
+ }
+}
+
+bool DebugEnvironments::updateLiveEnvironments(JSContext* cx) {
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return false;
+ }
+
+ /*
+ * Note that we must always update the top frame's environment objects'
+ * entries in liveEnvs because we can't be sure code hasn't run in that
+ * frame to change the environment chain since we were last called. The
+ * fp->prevUpToDate() flag indicates whether the environments of frames
+ * older than fp are already included in liveEnvs. It might seem simpler
+ * to have fp instead carry a flag indicating whether fp itself is
+ * accurately described, but then we would need to clear that flag
+ * whenever fp ran code. By storing the 'up to date' bit for fp->prev() in
+ * fp, simply popping fp effectively clears the flag for us, at exactly
+ * the time when execution resumes fp->prev().
+ */
+ for (AllFramesIter i(cx); !i.done(); ++i) {
+ if (!i.hasUsableAbstractFramePtr()) {
+ continue;
+ }
+
+ AbstractFramePtr frame = i.abstractFramePtr();
+ if (frame.realm() != cx->realm()) {
+ continue;
+ }
+
+ if (!frame.isDebuggee()) {
+ continue;
+ }
+
+ RootedObject env(cx);
+ Rooted<Scope*> scope(cx);
+ if (!GetFrameEnvironmentAndScope(cx, frame, i.pc(), &env, &scope)) {
+ return false;
+ }
+
+ for (EnvironmentIter ei(cx, env, scope, frame); ei.withinInitialFrame();
+ ei++) {
+ if (ei.hasSyntacticEnvironment() && !ei.scope().is<GlobalScope>()) {
+ MOZ_ASSERT(ei.environment().realm() == cx->realm());
+ DebugEnvironments* envs = ensureRealmData(cx);
+ if (!envs) {
+ return false;
+ }
+ if (!envs->liveEnvs.put(&ei.environment(), LiveEnvironmentVal(ei))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+ }
+
+ if (frame.prevUpToDate()) {
+ return true;
+ }
+ MOZ_ASSERT(frame.realm()->isDebuggee());
+ frame.setPrevUpToDate();
+ }
+
+ return true;
+}
+
+LiveEnvironmentVal* DebugEnvironments::hasLiveEnvironment(
+ EnvironmentObject& env) {
+ DebugEnvironments* envs = env.realm()->debugEnvs();
+ if (!envs) {
+ return nullptr;
+ }
+
+ if (LiveEnvironmentMap::Ptr p = envs->liveEnvs.lookup(&env)) {
+ return &p->value();
+ }
+
+ return nullptr;
+}
+
+/* static */
+void DebugEnvironments::unsetPrevUpToDateUntil(JSContext* cx,
+ AbstractFramePtr until) {
+ // This are two exceptions where fp->prevUpToDate() is cleared without
+ // popping the frame. When a frame is rematerialized or has its
+ // debuggeeness toggled off->on, all frames younger than the frame must
+ // have their prevUpToDate set to false. This is because unrematerialized
+ // Ion frames and non-debuggee frames are skipped by updateLiveEnvironments.
+ // If in the future a frame suddenly gains a usable AbstractFramePtr via
+ // rematerialization or becomes a debuggee, the prevUpToDate invariant
+ // will no longer hold for older frames on its stack.
+ for (AllFramesIter i(cx); !i.done(); ++i) {
+ if (!i.hasUsableAbstractFramePtr()) {
+ continue;
+ }
+
+ AbstractFramePtr frame = i.abstractFramePtr();
+ if (frame == until) {
+ return;
+ }
+
+ if (frame.realm() != cx->realm()) {
+ continue;
+ }
+
+ frame.unsetPrevUpToDate();
+ }
+}
+
+/* static */
+void DebugEnvironments::forwardLiveFrame(JSContext* cx, AbstractFramePtr from,
+ AbstractFramePtr to) {
+ DebugEnvironments* envs = cx->realm()->debugEnvs();
+ if (!envs) {
+ return;
+ }
+
+ for (MissingEnvironmentMap::Enum e(envs->missingEnvs); !e.empty();
+ e.popFront()) {
+ MissingEnvironmentKey key = e.front().key();
+ if (key.frame() == from) {
+ key.updateFrame(to);
+ e.rekeyFront(key);
+ }
+ }
+
+ for (LiveEnvironmentMap::Enum e(envs->liveEnvs); !e.empty(); e.popFront()) {
+ LiveEnvironmentVal& val = e.front().value();
+ if (val.frame() == from) {
+ val.updateFrame(to);
+ }
+ }
+}
+
+/* static */
+void DebugEnvironments::traceLiveFrame(JSTracer* trc, AbstractFramePtr frame) {
+ for (MissingEnvironmentMap::Enum e(missingEnvs); !e.empty(); e.popFront()) {
+ if (e.front().key().frame() == frame) {
+ TraceEdge(trc, &e.front().value(), "debug-env-live-frame-missing-env");
+ }
+ }
+}
+
+/*****************************************************************************/
+
+static JSObject* GetDebugEnvironment(JSContext* cx, const EnvironmentIter& ei);
+
+static DebugEnvironmentProxy* GetDebugEnvironmentForEnvironmentObject(
+ JSContext* cx, const EnvironmentIter& ei) {
+ Rooted<EnvironmentObject*> env(cx, &ei.environment());
+ if (DebugEnvironmentProxy* debugEnv =
+ DebugEnvironments::hasDebugEnvironment(cx, *env)) {
+ return debugEnv;
+ }
+
+ EnvironmentIter copy(cx, ei);
+ RootedObject enclosingDebug(cx, GetDebugEnvironment(cx, ++copy));
+ if (!enclosingDebug) {
+ return nullptr;
+ }
+
+ Rooted<DebugEnvironmentProxy*> debugEnv(
+ cx, DebugEnvironmentProxy::create(cx, *env, enclosingDebug));
+ if (!debugEnv) {
+ return nullptr;
+ }
+
+ if (!DebugEnvironments::addDebugEnvironment(cx, env, debugEnv)) {
+ return nullptr;
+ }
+
+ return debugEnv;
+}
+
+static DebugEnvironmentProxy* GetDebugEnvironmentForMissing(
+ JSContext* cx, const EnvironmentIter& ei) {
+ MOZ_ASSERT(!ei.hasSyntacticEnvironment() &&
+ (ei.scope().is<FunctionScope>() || ei.scope().is<LexicalScope>() ||
+ ei.scope().is<WasmInstanceScope>() ||
+ ei.scope().is<WasmFunctionScope>() || ei.scope().is<VarScope>() ||
+ ei.scope().kind() == ScopeKind::StrictEval));
+
+ if (DebugEnvironmentProxy* debugEnv =
+ DebugEnvironments::hasDebugEnvironment(cx, ei)) {
+ return debugEnv;
+ }
+
+ EnvironmentIter copy(cx, ei);
+ RootedObject enclosingDebug(cx, GetDebugEnvironment(cx, ++copy));
+ if (!enclosingDebug) {
+ return nullptr;
+ }
+
+ /*
+ * Create the missing environment object. For lexical environment objects,
+ * this takes care of storing variable values after the stack frame has
+ * been popped. For call objects, we only use the pretend call object to
+ * access callee, bindings and to receive dynamically added
+ * properties. Together, this provides the nice invariant that every
+ * DebugEnvironmentProxy has a EnvironmentObject.
+ *
+ * Note: to preserve envChain depth invariants, these lazily-reified
+ * envs must not be put on the frame's environment chain; instead, they are
+ * maintained via DebugEnvironments hooks.
+ */
+ Rooted<DebugEnvironmentProxy*> debugEnv(cx);
+ if (ei.scope().is<FunctionScope>()) {
+ RootedFunction callee(cx,
+ ei.scope().as<FunctionScope>().canonicalFunction());
+
+ JS::ExposeObjectToActiveJS(callee);
+ Rooted<CallObject*> callobj(cx,
+ CallObject::createHollowForDebug(cx, callee));
+ if (!callobj) {
+ return nullptr;
+ }
+
+ debugEnv = DebugEnvironmentProxy::create(cx, *callobj, enclosingDebug);
+ } else if (ei.scope().is<LexicalScope>()) {
+ Rooted<LexicalScope*> lexicalScope(cx, &ei.scope().as<LexicalScope>());
+ Rooted<BlockLexicalEnvironmentObject*> env(
+ cx,
+ BlockLexicalEnvironmentObject::createHollowForDebug(cx, lexicalScope));
+ if (!env) {
+ return nullptr;
+ }
+
+ debugEnv = DebugEnvironmentProxy::create(cx, *env, enclosingDebug);
+ } else if (ei.scope().is<WasmInstanceScope>()) {
+ Rooted<WasmInstanceScope*> wasmInstanceScope(
+ cx, &ei.scope().as<WasmInstanceScope>());
+ Rooted<WasmInstanceEnvironmentObject*> env(
+ cx, WasmInstanceEnvironmentObject::createHollowForDebug(
+ cx, wasmInstanceScope));
+ if (!env) {
+ return nullptr;
+ }
+
+ debugEnv = DebugEnvironmentProxy::create(cx, *env, enclosingDebug);
+ } else if (ei.scope().is<WasmFunctionScope>()) {
+ Rooted<WasmFunctionScope*> wasmFunctionScope(
+ cx, &ei.scope().as<WasmFunctionScope>());
+ RootedObject enclosing(
+ cx, &enclosingDebug->as<DebugEnvironmentProxy>().environment());
+ Rooted<WasmFunctionCallObject*> callobj(
+ cx, WasmFunctionCallObject::createHollowForDebug(cx, enclosing,
+ wasmFunctionScope));
+ if (!callobj) {
+ return nullptr;
+ }
+
+ debugEnv = DebugEnvironmentProxy::create(cx, *callobj, enclosingDebug);
+ } else {
+ Rooted<Scope*> scope(cx, &ei.scope());
+ MOZ_ASSERT(scope->is<VarScope>() || scope->kind() == ScopeKind::StrictEval);
+
+ Rooted<VarEnvironmentObject*> env(
+ cx, VarEnvironmentObject::createHollowForDebug(cx, scope));
+ if (!env) {
+ return nullptr;
+ }
+
+ debugEnv = DebugEnvironmentProxy::create(cx, *env, enclosingDebug);
+ }
+
+ if (!debugEnv) {
+ return nullptr;
+ }
+
+ if (!DebugEnvironments::addDebugEnvironment(cx, ei, debugEnv)) {
+ return nullptr;
+ }
+
+ return debugEnv;
+}
+
+static JSObject* GetDebugEnvironmentForNonEnvironmentObject(
+ const EnvironmentIter& ei) {
+ JSObject& enclosing = ei.enclosingEnvironment();
+#ifdef DEBUG
+ JSObject* o = &enclosing;
+ while ((o = o->enclosingEnvironment())) {
+ MOZ_ASSERT(!o->is<EnvironmentObject>());
+ }
+#endif
+ return &enclosing;
+}
+
+static JSObject* GetDebugEnvironment(JSContext* cx, const EnvironmentIter& ei) {
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return nullptr;
+ }
+
+ if (ei.done()) {
+ return GetDebugEnvironmentForNonEnvironmentObject(ei);
+ }
+
+ if (ei.hasAnyEnvironmentObject()) {
+ return GetDebugEnvironmentForEnvironmentObject(cx, ei);
+ }
+
+ if (ei.scope().is<FunctionScope>() || ei.scope().is<LexicalScope>() ||
+ ei.scope().is<WasmInstanceScope>() ||
+ ei.scope().is<WasmFunctionScope>() || ei.scope().is<VarScope>() ||
+ ei.scope().kind() == ScopeKind::StrictEval) {
+ return GetDebugEnvironmentForMissing(cx, ei);
+ }
+
+ EnvironmentIter copy(cx, ei);
+ return GetDebugEnvironment(cx, ++copy);
+}
+
+JSObject* js::GetDebugEnvironmentForFunction(JSContext* cx,
+ HandleFunction fun) {
+ cx->check(fun);
+ MOZ_ASSERT(CanUseDebugEnvironmentMaps(cx));
+ if (!DebugEnvironments::updateLiveEnvironments(cx)) {
+ return nullptr;
+ }
+ JSScript* script = JSFunction::getOrCreateScript(cx, fun);
+ if (!script) {
+ return nullptr;
+ }
+ EnvironmentIter ei(cx, fun->environment(), script->enclosingScope());
+ return GetDebugEnvironment(cx, ei);
+}
+
+JSObject* js::GetDebugEnvironmentForSuspendedGenerator(
+ JSContext* cx, JSScript* script, AbstractGeneratorObject& genObj) {
+ RootedObject env(cx);
+ Rooted<Scope*> scope(cx);
+ GetSuspendedGeneratorEnvironmentAndScope(genObj, script, &env, &scope);
+
+ EnvironmentIter ei(cx, env, scope);
+ return GetDebugEnvironment(cx, ei);
+}
+
+JSObject* js::GetDebugEnvironmentForFrame(JSContext* cx, AbstractFramePtr frame,
+ jsbytecode* pc) {
+ cx->check(frame);
+ if (CanUseDebugEnvironmentMaps(cx) &&
+ !DebugEnvironments::updateLiveEnvironments(cx)) {
+ return nullptr;
+ }
+
+ RootedObject env(cx);
+ Rooted<Scope*> scope(cx);
+ if (!GetFrameEnvironmentAndScope(cx, frame, pc, &env, &scope)) {
+ return nullptr;
+ }
+
+ EnvironmentIter ei(cx, env, scope, frame);
+ return GetDebugEnvironment(cx, ei);
+}
+
+JSObject* js::GetDebugEnvironmentForGlobalLexicalEnvironment(JSContext* cx) {
+ EnvironmentIter ei(cx, &cx->global()->lexicalEnvironment(),
+ &cx->global()->emptyGlobalScope());
+ return GetDebugEnvironment(cx, ei);
+}
+
+bool js::CreateObjectsForEnvironmentChain(JSContext* cx,
+ HandleObjectVector chain,
+ HandleObject terminatingEnv,
+ MutableHandleObject envObj) {
+#ifdef DEBUG
+ for (size_t i = 0; i < chain.length(); ++i) {
+ cx->check(chain[i]);
+ MOZ_ASSERT(!chain[i]->isUnqualifiedVarObj());
+ }
+#endif
+
+ // Construct With object wrappers for the things on this environment chain
+ // and use the result as the thing to scope the function to.
+ Rooted<WithEnvironmentObject*> withEnv(cx);
+ RootedObject enclosingEnv(cx, terminatingEnv);
+ for (size_t i = chain.length(); i > 0;) {
+ withEnv =
+ WithEnvironmentObject::createNonSyntactic(cx, chain[--i], enclosingEnv);
+ if (!withEnv) {
+ return false;
+ }
+ enclosingEnv = withEnv;
+ }
+
+ envObj.set(enclosingEnv);
+ return true;
+}
+
+JSObject& WithEnvironmentObject::object() const {
+ return getReservedSlot(OBJECT_SLOT).toObject();
+}
+
+JSObject* WithEnvironmentObject::withThis() const {
+ return &getReservedSlot(THIS_SLOT).toObject();
+}
+
+bool WithEnvironmentObject::isSyntactic() const {
+ Value v = getReservedSlot(SCOPE_SLOT);
+ MOZ_ASSERT(v.isPrivateGCThing() || v.isNull());
+ return v.isPrivateGCThing();
+}
+
+WithScope& WithEnvironmentObject::scope() const {
+ MOZ_ASSERT(isSyntactic());
+ return *static_cast<WithScope*>(getReservedSlot(SCOPE_SLOT).toGCThing());
+}
+
+ModuleEnvironmentObject* js::GetModuleEnvironmentForScript(JSScript* script) {
+ ModuleObject* module = GetModuleObjectForScript(script);
+ if (!module) {
+ return nullptr;
+ }
+
+ return module->environment();
+}
+
+ModuleObject* js::GetModuleObjectForScript(JSScript* script) {
+ for (ScopeIter si(script); si; si++) {
+ if (si.kind() == ScopeKind::Module) {
+ return si.scope()->as<ModuleScope>().module();
+ }
+ }
+ return nullptr;
+}
+
+static bool GetThisValueForDebuggerEnvironmentIterMaybeOptimizedOut(
+ JSContext* cx, const EnvironmentIter& originalIter, HandleObject scopeChain,
+ const jsbytecode* pc, MutableHandleValue res) {
+ for (EnvironmentIter ei(cx, originalIter); ei; ei++) {
+ if (ei.scope().kind() == ScopeKind::Module) {
+ res.setUndefined();
+ return true;
+ }
+
+ if (!ei.scope().is<FunctionScope>() ||
+ ei.scope().as<FunctionScope>().canonicalFunction()->hasLexicalThis()) {
+ continue;
+ }
+
+ RootedScript script(cx, ei.scope().as<FunctionScope>().script());
+
+ if (ei.withinInitialFrame()) {
+ MOZ_ASSERT(pc, "must have PC if there is an initial frame");
+
+ // Figure out if we executed JSOp::FunctionThis and set it.
+ bool executedInitThisOp = false;
+ if (script->functionHasThisBinding()) {
+ for (const BytecodeLocation& loc : js::AllBytecodesIterable(script)) {
+ if (loc.getOp() == JSOp::FunctionThis) {
+ // The next op after JSOp::FunctionThis always sets it.
+ executedInitThisOp = pc > GetNextPc(loc.toRawBytecode());
+ break;
+ }
+ }
+ }
+
+ if (!executedInitThisOp) {
+ AbstractFramePtr initialFrame = ei.initialFrame();
+ // Either we're yet to initialize the this-binding
+ // (JSOp::FunctionThis), or the script does not have a this-binding
+ // (because it doesn't use |this|).
+
+ // If our this-argument is an object, or we're in strict mode,
+ // the this-binding is always the same as our this-argument.
+ if (initialFrame.thisArgument().isObject() || script->strict()) {
+ res.set(initialFrame.thisArgument());
+ return true;
+ }
+
+ // We didn't initialize the this-binding yet. Determine the
+ // correct |this| value for this frame (box primitives if not
+ // in strict mode), and assign it to the this-argument slot so
+ // JSOp::FunctionThis will use it and not box a second time.
+ if (!GetFunctionThis(cx, initialFrame, res)) {
+ return false;
+ }
+ initialFrame.thisArgument() = res;
+ return true;
+ }
+ }
+
+ if (!script->functionHasThisBinding()) {
+ res.setMagic(JS_OPTIMIZED_OUT);
+ return true;
+ }
+
+ for (Rooted<BindingIter> bi(cx, BindingIter(script)); bi; bi++) {
+ if (bi.name() != cx->names().dotThis) {
+ continue;
+ }
+
+ BindingLocation loc = bi.location();
+ if (loc.kind() == BindingLocation::Kind::Environment) {
+ RootedObject callObj(cx, &ei.environment().as<CallObject>());
+ return GetProperty(cx, callObj, callObj, bi.name()->asPropertyName(),
+ res);
+ }
+
+ if (loc.kind() == BindingLocation::Kind::Frame) {
+ if (ei.withinInitialFrame()) {
+ res.set(ei.initialFrame().unaliasedLocal(loc.slot()));
+ return true;
+ }
+
+ if (ei.hasAnyEnvironmentObject()) {
+ RootedObject env(cx, &ei.environment());
+ AbstractGeneratorObject* genObj =
+ GetGeneratorObjectForEnvironment(cx, env);
+ if (genObj && genObj->isSuspended() && genObj->hasStackStorage()) {
+ res.set(genObj->getUnaliasedLocal(loc.slot()));
+ return true;
+ }
+ }
+ }
+
+ res.setMagic(JS_OPTIMIZED_OUT);
+ return true;
+ }
+
+ MOZ_CRASH("'this' binding must be found");
+ }
+
+ GetNonSyntacticGlobalThis(cx, scopeChain, res);
+ return true;
+}
+
+bool js::GetThisValueForDebuggerFrameMaybeOptimizedOut(JSContext* cx,
+ AbstractFramePtr frame,
+ const jsbytecode* pc,
+ MutableHandleValue res) {
+ RootedObject scopeChain(cx);
+ Rooted<Scope*> scope(cx);
+ if (!GetFrameEnvironmentAndScope(cx, frame, pc, &scopeChain, &scope)) {
+ return false;
+ }
+
+ EnvironmentIter ei(cx, scopeChain, scope, frame);
+ return GetThisValueForDebuggerEnvironmentIterMaybeOptimizedOut(
+ cx, ei, scopeChain, pc, res);
+}
+
+bool js::GetThisValueForDebuggerSuspendedGeneratorMaybeOptimizedOut(
+ JSContext* cx, AbstractGeneratorObject& genObj, JSScript* script,
+ MutableHandleValue res) {
+ RootedObject scopeChain(cx);
+ Rooted<Scope*> scope(cx);
+ GetSuspendedGeneratorEnvironmentAndScope(genObj, script, &scopeChain, &scope);
+
+ EnvironmentIter ei(cx, scopeChain, scope);
+ return GetThisValueForDebuggerEnvironmentIterMaybeOptimizedOut(
+ cx, ei, scopeChain, nullptr, res);
+}
+
+bool js::CheckLexicalNameConflict(
+ JSContext* cx, Handle<ExtensibleLexicalEnvironmentObject*> lexicalEnv,
+ HandleObject varObj, Handle<PropertyName*> name) {
+ const char* redeclKind = nullptr;
+ RootedId id(cx, NameToId(name));
+ mozilla::Maybe<PropertyInfo> prop;
+ bool shadowsExistingProperty = false;
+ if (varObj->is<GlobalObject>() &&
+ varObj->as<GlobalObject>().isInVarNames(name)) {
+ // ES 15.1.11 step 5.a
+ redeclKind = "var";
+ } else if ((prop = lexicalEnv->lookup(cx, name))) {
+ // ES 15.1.11 step 5.b
+ redeclKind = prop->writable() ? "let" : "const";
+ } else if (varObj->is<NativeObject>() &&
+ (prop = varObj->as<NativeObject>().lookup(cx, name))) {
+ // Faster path for ES 15.1.11 step 5.c-d when the shape can be found
+ // without going through a resolve hook.
+ if (!prop->configurable()) {
+ redeclKind = "non-configurable global property";
+ } else {
+ shadowsExistingProperty = true;
+ }
+ } else {
+ // ES 15.1.11 step 5.c-d
+ Rooted<mozilla::Maybe<PropertyDescriptor>> desc(cx);
+ if (!GetOwnPropertyDescriptor(cx, varObj, id, &desc)) {
+ return false;
+ }
+ if (desc.isSome()) {
+ if (!desc->configurable()) {
+ redeclKind = "non-configurable global property";
+ } else {
+ shadowsExistingProperty = true;
+ }
+ }
+ }
+
+ if (redeclKind) {
+ ReportRuntimeRedeclaration(cx, name, redeclKind);
+ return false;
+ }
+ if (shadowsExistingProperty && varObj->is<GlobalObject>()) {
+ // Shadowing a configurable global property with a new lexical is one
+ // of the rare ways to invalidate a GetGName stub.
+ varObj->as<GlobalObject>().bumpGenerationCount();
+ }
+
+ return true;
+}
+
+[[nodiscard]] static bool CheckVarNameConflict(
+ JSContext* cx, Handle<LexicalEnvironmentObject*> lexicalEnv,
+ Handle<PropertyName*> name) {
+ mozilla::Maybe<PropertyInfo> prop = lexicalEnv->lookup(cx, name);
+ if (prop.isSome()) {
+ ReportRuntimeRedeclaration(cx, name, prop->writable() ? "let" : "const");
+ return false;
+ }
+ return true;
+}
+
+static void ReportCannotDeclareGlobalBinding(JSContext* cx,
+ Handle<PropertyName*> name,
+ const char* reason) {
+ if (UniqueChars printable = AtomToPrintableString(cx, name)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CANT_DECLARE_GLOBAL_BINDING,
+ printable.get(), reason);
+ }
+}
+
+bool js::CheckCanDeclareGlobalBinding(JSContext* cx,
+ Handle<GlobalObject*> global,
+ Handle<PropertyName*> name,
+ bool isFunction) {
+ RootedId id(cx, NameToId(name));
+ Rooted<mozilla::Maybe<PropertyDescriptor>> desc(cx);
+ if (!GetOwnPropertyDescriptor(cx, global, id, &desc)) {
+ return false;
+ }
+
+ // ES 8.1.1.4.15 CanDeclareGlobalVar
+ // ES 8.1.1.4.16 CanDeclareGlobalFunction
+
+ // Step 4.
+ if (desc.isNothing()) {
+ // 8.1.14.15 step 6.
+ // 8.1.14.16 step 5.
+ if (global->isExtensible()) {
+ return true;
+ }
+
+ ReportCannotDeclareGlobalBinding(cx, name, "global is non-extensible");
+ return false;
+ }
+
+ // Global functions have additional restrictions.
+ if (isFunction) {
+ // 8.1.14.16 step 6.
+ if (desc->configurable()) {
+ return true;
+ }
+
+ // 8.1.14.16 step 7.
+ if (desc->isDataDescriptor() && desc->writable() && desc->enumerable()) {
+ return true;
+ }
+
+ ReportCannotDeclareGlobalBinding(cx, name,
+ "property must be configurable or "
+ "both writable and enumerable");
+ return false;
+ }
+
+ return true;
+}
+
+// Add the var/let/const bindings to the variables environment of a global or
+// sloppy-eval script. The redeclaration checks should already have been
+// performed.
+static bool InitGlobalOrEvalDeclarations(
+ JSContext* cx, HandleScript script,
+ Handle<ExtensibleLexicalEnvironmentObject*> lexicalEnv,
+ HandleObject varObj) {
+ Rooted<BindingIter> bi(cx, BindingIter(script));
+ for (; bi; bi++) {
+ if (bi.isTopLevelFunction()) {
+ continue;
+ }
+
+ Rooted<PropertyName*> name(cx, bi.name()->asPropertyName());
+ unsigned attrs = script->isForEval() ? JSPROP_ENUMERATE
+ : JSPROP_ENUMERATE | JSPROP_PERMANENT;
+
+ switch (bi.kind()) {
+ case BindingKind::Var: {
+ PropertyResult prop;
+ RootedObject obj2(cx);
+ if (!LookupProperty(cx, varObj, name, &obj2, &prop)) {
+ return false;
+ }
+
+ if (prop.isNotFound() ||
+ (obj2 != varObj && varObj->is<GlobalObject>())) {
+ if (!DefineDataProperty(cx, varObj, name, UndefinedHandleValue,
+ attrs)) {
+ return false;
+ }
+ }
+
+ if (varObj->is<GlobalObject>()) {
+ if (!varObj->as<GlobalObject>().addToVarNames(cx, name)) {
+ return false;
+ }
+ }
+
+ break;
+ }
+
+ case BindingKind::Const:
+ attrs |= JSPROP_READONLY;
+ [[fallthrough]];
+
+ case BindingKind::Let: {
+ RootedId id(cx, NameToId(name));
+ RootedValue uninitialized(cx, MagicValue(JS_UNINITIALIZED_LEXICAL));
+ if (!NativeDefineDataProperty(cx, lexicalEnv, id, uninitialized,
+ attrs)) {
+ return false;
+ }
+
+ break;
+ }
+
+ default:
+ MOZ_CRASH("Expected binding kind");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Define the hoisted top-level functions on the variables environment of a
+// global or sloppy-eval script. Redeclaration checks must already have been
+// performed.
+static bool InitHoistedFunctionDeclarations(JSContext* cx, HandleScript script,
+ HandleObject envChain,
+ HandleObject varObj,
+ GCThingIndex lastFun) {
+ // The inner-functions up to `lastFun` are the hoisted function declarations
+ // of the script. We must clone and bind them now.
+ for (size_t i = 0; i <= lastFun; ++i) {
+ JS::GCCellPtr thing = script->gcthings()[i];
+
+ // Skip the initial scopes. In practice, there is at most one variables and
+ // one lexical scope.
+ if (thing.is<js::Scope>()) {
+ MOZ_ASSERT(i < 2);
+ continue;
+ }
+
+ RootedFunction fun(cx, &thing.as<JSObject>().as<JSFunction>());
+ Rooted<PropertyName*> name(cx, fun->explicitName()->asPropertyName());
+
+ // Clone the function before exposing to script as a binding.
+ JSObject* clone = Lambda(cx, fun, envChain);
+ if (!clone) {
+ return false;
+ }
+ RootedValue rval(cx, ObjectValue(*clone));
+
+ PropertyResult prop;
+ RootedObject pobj(cx);
+ if (!LookupProperty(cx, varObj, name, &pobj, &prop)) {
+ return false;
+ }
+
+ // ECMA requires functions defined when entering Eval code to be
+ // impermanent.
+ unsigned attrs = script->isForEval() ? JSPROP_ENUMERATE
+ : JSPROP_ENUMERATE | JSPROP_PERMANENT;
+
+ if (prop.isNotFound() || pobj != varObj) {
+ if (!DefineDataProperty(cx, varObj, name, rval, attrs)) {
+ return false;
+ }
+
+ if (varObj->is<GlobalObject>()) {
+ if (!varObj->as<GlobalObject>().addToVarNames(cx, name)) {
+ return false;
+ }
+ }
+
+ // Done processing this function.
+ continue;
+ }
+
+ /*
+ * A DebugEnvironmentProxy is okay here, and sometimes necessary. If
+ * Debugger.Frame.prototype.eval defines a function with the same name as an
+ * extant variable in the frame, the DebugEnvironmentProxy takes care of
+ * storing the function in the stack frame (for non-aliased variables) or on
+ * the scope object (for aliased).
+ */
+ MOZ_ASSERT(varObj->is<NativeObject>() ||
+ varObj->is<DebugEnvironmentProxy>());
+ if (varObj->is<GlobalObject>()) {
+ PropertyInfo propInfo = prop.propertyInfo();
+ if (propInfo.configurable()) {
+ if (!DefineDataProperty(cx, varObj, name, rval, attrs)) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(propInfo.isDataProperty());
+ MOZ_ASSERT(propInfo.writable());
+ MOZ_ASSERT(propInfo.enumerable());
+ }
+
+ // Careful: the presence of a shape, even one appearing to derive from
+ // a variable declaration, doesn't mean it's in [[VarNames]].
+ if (!varObj->as<GlobalObject>().addToVarNames(cx, name)) {
+ return false;
+ }
+ }
+
+ /*
+ * Non-global properties, and global properties which we aren't simply
+ * redefining, must be set. First, this preserves their attributes.
+ * Second, this will produce warnings and/or errors as necessary if the
+ * specified Call object property is not writable (const).
+ */
+
+ RootedId id(cx, NameToId(name));
+ if (!PutProperty(cx, varObj, id, rval, script->strict())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool js::CheckGlobalDeclarationConflicts(
+ JSContext* cx, HandleScript script,
+ Handle<ExtensibleLexicalEnvironmentObject*> lexicalEnv,
+ HandleObject varObj) {
+ // Due to the extensibility of the global lexical environment, we must
+ // check for redeclaring a binding.
+ //
+ // In the case of non-syntactic environment chains, we are checking
+ // redeclarations against the non-syntactic lexical environment and the
+ // variables object that the lexical environment corresponds to.
+ Rooted<PropertyName*> name(cx);
+ Rooted<BindingIter> bi(cx, BindingIter(script));
+
+ // ES 15.1.11 GlobalDeclarationInstantiation
+
+ // Step 6.
+ //
+ // Check 'var' declarations do not conflict with existing bindings in the
+ // global lexical environment.
+ for (; bi; bi++) {
+ if (bi.kind() != BindingKind::Var) {
+ break;
+ }
+ name = bi.name()->asPropertyName();
+ if (!CheckVarNameConflict(cx, lexicalEnv, name)) {
+ return false;
+ }
+
+ // Step 10 and 12.
+ //
+ // Check that global functions and vars may be declared.
+ if (varObj->is<GlobalObject>()) {
+ Handle<GlobalObject*> global = varObj.as<GlobalObject>();
+ if (!CheckCanDeclareGlobalBinding(cx, global, name,
+ bi.isTopLevelFunction())) {
+ return false;
+ }
+ }
+ }
+
+ // Step 5.
+ //
+ // Check that lexical bindings do not conflict.
+ for (; bi; bi++) {
+ name = bi.name()->asPropertyName();
+ if (!CheckLexicalNameConflict(cx, lexicalEnv, varObj, name)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+[[nodiscard]] static bool CheckVarNameConflictsInEnv(JSContext* cx,
+ HandleScript script,
+ HandleObject obj) {
+ Rooted<LexicalEnvironmentObject*> env(cx);
+
+ if (obj->is<LexicalEnvironmentObject>()) {
+ env = &obj->as<LexicalEnvironmentObject>();
+ } else if (obj->is<DebugEnvironmentProxy>() &&
+ obj->as<DebugEnvironmentProxy>()
+ .environment()
+ .is<LexicalEnvironmentObject>()) {
+ env = &obj->as<DebugEnvironmentProxy>()
+ .environment()
+ .as<LexicalEnvironmentObject>();
+ } else {
+ // Environment cannot contain lexical bindings.
+ return true;
+ }
+
+ if (env->is<BlockLexicalEnvironmentObject>() &&
+ env->as<BlockLexicalEnvironmentObject>().scope().kind() ==
+ ScopeKind::SimpleCatch) {
+ // Annex B.3.5 allows redeclaring simple (non-destructured) catch parameters
+ // with var declarations.
+ return true;
+ }
+
+ Rooted<PropertyName*> name(cx);
+ for (BindingIter bi(script); bi; bi++) {
+ name = bi.name()->asPropertyName();
+ if (!CheckVarNameConflict(cx, env, name)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool CheckArgumentsRedeclaration(JSContext* cx, HandleScript script) {
+ for (BindingIter bi(script); bi; bi++) {
+ if (bi.name() == cx->names().arguments) {
+ ReportRuntimeRedeclaration(cx, cx->names().arguments, "let");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool CheckEvalDeclarationConflicts(JSContext* cx, HandleScript script,
+ HandleObject scopeChain,
+ HandleObject varObj) {
+ // Strict eval has its own call objects and we shouldn't end up here.
+ //
+ // Non-strict eval may introduce 'var' bindings that conflict with lexical
+ // bindings in an enclosing lexical scope.
+ MOZ_ASSERT(!script->bodyScope()->hasEnvironment());
+ MOZ_ASSERT(!script->strict());
+
+ MOZ_ASSERT(script->bodyScope()->as<EvalScope>().hasBindings());
+
+ RootedObject obj(cx, scopeChain);
+
+ // ES 18.2.1.3.
+
+ // Step 5.
+ //
+ // Check that a direct eval will not hoist 'var' bindings over lexical
+ // bindings with the same name.
+ while (obj != varObj) {
+ if (!CheckVarNameConflictsInEnv(cx, script, obj)) {
+ return false;
+ }
+ obj = obj->enclosingEnvironment();
+ }
+
+ // Check for redeclared "arguments" in function parameter expressions.
+ //
+ // Direct eval in function parameter expressions isn't allowed to redeclare
+ // the implicit "arguments" bindings:
+ // function f(a = eval("var arguments;")) {}
+ //
+ // |varObj| isn't a CallObject when the direct eval occurs in the function
+ // body and the extra function body var scope is present. The extra var scope
+ // is present iff the function has parameter expressions. So when we test
+ // that |varObj| is a CallObject and function parameter expressions are
+ // present, we can pinpoint the direct eval location to be in a function
+ // parameter expression. Additionally we must ensure the function isn't an
+ // arrow function, because arrow functions don't have an implicit "arguments"
+ // binding.
+ if (script->isDirectEvalInFunction() && varObj->is<CallObject>()) {
+ JSFunction* fun = &varObj->as<CallObject>().callee();
+ JSScript* funScript = fun->nonLazyScript();
+ if (funScript->functionHasParameterExprs() && !fun->isArrow()) {
+ if (!CheckArgumentsRedeclaration(cx, script)) {
+ return false;
+ }
+ }
+ }
+
+ // Step 8.
+ //
+ // Check that global functions may be declared.
+ if (varObj->is<GlobalObject>()) {
+ Handle<GlobalObject*> global = varObj.as<GlobalObject>();
+ Rooted<PropertyName*> name(cx);
+ for (Rooted<BindingIter> bi(cx, BindingIter(script)); bi; bi++) {
+ name = bi.name()->asPropertyName();
+ if (!CheckCanDeclareGlobalBinding(cx, global, name,
+ bi.isTopLevelFunction())) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool js::GlobalOrEvalDeclInstantiation(JSContext* cx, HandleObject envChain,
+ HandleScript script,
+ GCThingIndex lastFun) {
+ MOZ_ASSERT(script->isGlobalCode() || script->isForEval());
+ MOZ_ASSERT(!script->selfHosted());
+
+ RootedObject varObj(cx, &GetVariablesObject(envChain));
+ Rooted<ExtensibleLexicalEnvironmentObject*> lexicalEnv(cx);
+
+ if (script->isForEval()) {
+ if (!CheckEvalDeclarationConflicts(cx, script, envChain, varObj)) {
+ return false;
+ }
+ } else {
+ lexicalEnv = &NearestEnclosingExtensibleLexicalEnvironment(envChain);
+ if (!CheckGlobalDeclarationConflicts(cx, script, lexicalEnv, varObj)) {
+ return false;
+ }
+ }
+
+ if (!InitGlobalOrEvalDeclarations(cx, script, lexicalEnv, varObj)) {
+ return false;
+ }
+
+ return InitHoistedFunctionDeclarations(cx, script, envChain, varObj, lastFun);
+}
+
+bool js::InitFunctionEnvironmentObjects(JSContext* cx, AbstractFramePtr frame) {
+ MOZ_ASSERT(frame.isFunctionFrame());
+ MOZ_ASSERT(frame.callee()->needsFunctionEnvironmentObjects());
+
+ RootedFunction callee(cx, frame.callee());
+
+ // Named lambdas may have an environment that holds itself for recursion.
+ if (callee->needsNamedLambdaEnvironment()) {
+ NamedLambdaObject* declEnv = NamedLambdaObject::create(cx, frame);
+ if (!declEnv) {
+ return false;
+ }
+ frame.pushOnEnvironmentChain(*declEnv);
+ }
+
+ // If the function has parameter default expressions, there may be an
+ // extra environment to hold the parameters.
+ if (callee->needsCallObject()) {
+ CallObject* callObj = CallObject::create(cx, frame);
+ if (!callObj) {
+ return false;
+ }
+ frame.pushOnEnvironmentChain(*callObj);
+ }
+
+ return true;
+}
+
+bool js::PushVarEnvironmentObject(JSContext* cx, Handle<Scope*> scope,
+ AbstractFramePtr frame) {
+ auto* env = VarEnvironmentObject::createForFrame(cx, scope, frame);
+ if (!env) {
+ return false;
+ }
+ frame.pushOnEnvironmentChain(*env);
+ return true;
+}
+
+bool js::GetFrameEnvironmentAndScope(JSContext* cx, AbstractFramePtr frame,
+ const jsbytecode* pc,
+ MutableHandleObject env,
+ MutableHandle<Scope*> scope) {
+ env.set(frame.environmentChain());
+
+ if (frame.isWasmDebugFrame()) {
+ Rooted<WasmInstanceObject*> instance(cx, frame.wasmInstance()->object());
+ uint32_t funcIndex = frame.asWasmDebugFrame()->funcIndex();
+ scope.set(WasmInstanceObject::getFunctionScope(cx, instance, funcIndex));
+ if (!scope) {
+ return false;
+ }
+ } else {
+ scope.set(frame.script()->innermostScope(pc));
+ }
+ return true;
+}
+
+void js::GetSuspendedGeneratorEnvironmentAndScope(
+ AbstractGeneratorObject& genObj, JSScript* script, MutableHandleObject env,
+ MutableHandle<Scope*> scope) {
+ env.set(&genObj.environmentChain());
+
+ jsbytecode* pc =
+ script->offsetToPC(script->resumeOffsets()[genObj.resumeIndex()]);
+ scope.set(script->innermostScope(pc));
+}
+
+#ifdef DEBUG
+
+typedef HashSet<PropertyName*> PropertyNameSet;
+
+static bool RemoveReferencedNames(JSContext* cx, HandleScript script,
+ PropertyNameSet& remainingNames) {
+ // Remove from remainingNames --- the closure variables in some outer
+ // script --- any free variables in this script. This analysis isn't perfect:
+ //
+ // - It will not account for free variables in an inner script which are
+ // actually accessing some name in an intermediate script between the
+ // inner and outer scripts. This can cause remainingNames to be an
+ // underapproximation.
+ //
+ // - It will not account for new names introduced via eval. This can cause
+ // remainingNames to be an overapproximation. This would be easy to fix
+ // but is nice to have as the eval will probably not access these
+ // these names and putting eval in an inner script is bad news if you
+ // care about entraining variables unnecessarily.
+
+ AllBytecodesIterable iter(script);
+ for (BytecodeLocation loc : iter) {
+ PropertyName* name;
+
+ switch (loc.getOp()) {
+ case JSOp::GetName:
+ case JSOp::SetName:
+ case JSOp::StrictSetName:
+ name = script->getName(loc.toRawBytecode());
+ break;
+
+ case JSOp::GetAliasedVar:
+ case JSOp::SetAliasedVar:
+ name = EnvironmentCoordinateNameSlow(script, loc.toRawBytecode());
+ break;
+
+ default:
+ name = nullptr;
+ break;
+ }
+
+ if (name) {
+ remainingNames.remove(name);
+ }
+ }
+
+ RootedFunction fun(cx);
+ RootedScript innerScript(cx);
+ for (JS::GCCellPtr gcThing : script->gcthings()) {
+ if (!gcThing.is<JSObject>()) {
+ continue;
+ }
+ JSObject* obj = &gcThing.as<JSObject>();
+
+ if (!obj->is<JSFunction>()) {
+ continue;
+ }
+ fun = &obj->as<JSFunction>();
+
+ if (!fun->isInterpreted()) {
+ continue;
+ }
+
+ innerScript = JSFunction::getOrCreateScript(cx, fun);
+ if (!innerScript) {
+ return false;
+ }
+
+ if (!RemoveReferencedNames(cx, innerScript, remainingNames)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool AnalyzeEntrainedVariablesInScript(JSContext* cx,
+ HandleScript script,
+ HandleScript innerScript) {
+ PropertyNameSet remainingNames(cx);
+
+ for (BindingIter bi(script); bi; bi++) {
+ if (bi.closedOver()) {
+ PropertyName* name = bi.name()->asPropertyName();
+ PropertyNameSet::AddPtr p = remainingNames.lookupForAdd(name);
+ if (!p && !remainingNames.add(p, name)) {
+ return false;
+ }
+ }
+ }
+
+ if (!RemoveReferencedNames(cx, innerScript, remainingNames)) {
+ return false;
+ }
+
+ if (!remainingNames.empty()) {
+ Sprinter buf(cx);
+ if (!buf.init()) {
+ return false;
+ }
+
+ buf.printf("Script ");
+
+ if (JSAtom* name = script->function()->displayAtom()) {
+ buf.putString(name);
+ buf.printf(" ");
+ }
+
+ buf.printf("(%s:%u) has variables entrained by ", script->filename(),
+ script->lineno());
+
+ if (JSAtom* name = innerScript->function()->displayAtom()) {
+ buf.putString(name);
+ buf.printf(" ");
+ }
+
+ buf.printf("(%s:%u) ::", innerScript->filename(), innerScript->lineno());
+
+ for (PropertyNameSet::Range r = remainingNames.all(); !r.empty();
+ r.popFront()) {
+ buf.printf(" ");
+ buf.putString(r.front());
+ }
+
+ printf("%s\n", buf.string());
+ }
+
+ RootedFunction fun(cx);
+ RootedScript innerInnerScript(cx);
+ for (JS::GCCellPtr gcThing : script->gcthings()) {
+ if (!gcThing.is<JSObject>()) {
+ continue;
+ }
+ JSObject* obj = &gcThing.as<JSObject>();
+
+ if (!obj->is<JSFunction>()) {
+ continue;
+ }
+ fun = &obj->as<JSFunction>();
+
+ if (!fun->isInterpreted()) {
+ continue;
+ }
+
+ innerInnerScript = JSFunction::getOrCreateScript(cx, fun);
+ if (!innerInnerScript) {
+ return false;
+ }
+
+ if (!AnalyzeEntrainedVariablesInScript(cx, script, innerInnerScript)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Look for local variables in script or any other script inner to it, which are
+// part of the script's call object and are unnecessarily entrained by their own
+// inner scripts which do not refer to those variables. An example is:
+//
+// function foo() {
+// var a, b;
+// function bar() { return a; }
+// function baz() { return b; }
+// }
+//
+// |bar| unnecessarily entrains |b|, and |baz| unnecessarily entrains |a|.
+bool js::AnalyzeEntrainedVariables(JSContext* cx, HandleScript script) {
+ RootedFunction fun(cx);
+ RootedScript innerScript(cx);
+ for (JS::GCCellPtr gcThing : script->gcthings()) {
+ if (!gcThing.is<JSObject>()) {
+ continue;
+ }
+ JSObject* obj = &gcThing.as<JSObject>();
+
+ if (!obj->is<JSFunction>()) {
+ continue;
+ }
+ fun = &obj->as<JSFunction>();
+
+ if (!fun->isInterpreted()) {
+ continue;
+ }
+
+ innerScript = JSFunction::getOrCreateScript(cx, fun);
+ if (!innerScript) {
+ return false;
+ }
+
+ if (fun->needsCallObject()) {
+ if (!AnalyzeEntrainedVariablesInScript(cx, script, innerScript)) {
+ return false;
+ }
+ }
+
+ if (!AnalyzeEntrainedVariables(cx, innerScript)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+#endif
+
+JSObject* js::MaybeOptimizeBindGlobalName(JSContext* cx,
+ Handle<GlobalObject*> global,
+ Handle<PropertyName*> name) {
+ // We can bind name to the global lexical scope if the binding already
+ // exists, is initialized, and is writable (i.e., an initialized
+ // 'let') at compile time.
+ Rooted<GlobalLexicalEnvironmentObject*> env(cx,
+ &global->lexicalEnvironment());
+ mozilla::Maybe<PropertyInfo> prop = env->lookup(cx, name);
+ if (prop.isSome()) {
+ if (prop->writable() &&
+ !env->getSlot(prop->slot()).isMagic(JS_UNINITIALIZED_LEXICAL)) {
+ return env;
+ }
+ return nullptr;
+ }
+
+ prop = global->lookup(cx, name);
+ if (prop.isSome()) {
+ // If the property does not currently exist on the global lexical
+ // scope, we can bind name to the global object if the property
+ // exists on the global and is non-configurable, as then it cannot
+ // be shadowed.
+ if (!prop->configurable()) {
+ return global;
+ }
+ }
+
+ return nullptr;
+}
+
+const char* EnvironmentObject::typeString() const {
+ if (is<CallObject>()) {
+ return "CallObject";
+ }
+ if (is<VarEnvironmentObject>()) {
+ return "VarEnvironmentObject";
+ }
+ if (is<ModuleEnvironmentObject>()) {
+ return "ModuleEnvironmentObject";
+ }
+ if (is<WasmInstanceEnvironmentObject>()) {
+ return "WasmInstanceEnvironmentObject";
+ }
+ if (is<WasmFunctionCallObject>()) {
+ return "WasmFunctionCallObject";
+ }
+ if (is<LexicalEnvironmentObject>()) {
+ if (is<ScopedLexicalEnvironmentObject>()) {
+ if (is<BlockLexicalEnvironmentObject>()) {
+ if (is<NamedLambdaObject>()) {
+ return "NamedLambdaObject";
+ }
+ return "BlockLexicalEnvironmentObject";
+ }
+ if (is<ClassBodyLexicalEnvironmentObject>()) {
+ return "ClassBodyLexicalEnvironmentObject";
+ }
+ return "ScopedLexicalEnvironmentObject";
+ }
+
+ if (is<ExtensibleLexicalEnvironmentObject>()) {
+ if (is<GlobalLexicalEnvironmentObject>()) {
+ return "GlobalLexicalEnvironmentObject";
+ }
+ if (is<NonSyntacticLexicalEnvironmentObject>()) {
+ return "NonSyntacticLexicalEnvironmentObject";
+ }
+ return "ExtensibleLexicalEnvironmentObject";
+ }
+
+ return "LexicalEnvironmentObject";
+ }
+ if (is<NonSyntacticVariablesObject>()) {
+ return "NonSyntacticVariablesObject";
+ }
+ if (is<WithEnvironmentObject>()) {
+ return "WithEnvironmentObject";
+ }
+ if (is<RuntimeLexicalErrorObject>()) {
+ return "RuntimeLexicalErrorObject";
+ }
+
+ return "EnvironmentObject";
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+static void DumpEnvironmentObject(JSObject* unrootedEnvObj) {
+ JSContext* cx = TlsContext.get();
+ if (!cx) {
+ fprintf(stderr, "*** can't get JSContext for current thread\n");
+ return;
+ }
+
+ Rooted<JSObject*> envObj(cx, unrootedEnvObj);
+ while (envObj) {
+ Rooted<EnvironmentObject*> env(cx);
+ if (envObj->is<EnvironmentObject>()) {
+ env = &envObj->as<EnvironmentObject>();
+ } else if (envObj->is<DebugEnvironmentProxy>()) {
+ fprintf(stderr, "[DebugProxy] ");
+ env = &envObj->as<DebugEnvironmentProxy>().environment();
+ } else {
+ MOZ_ASSERT(envObj->is<GlobalObject>());
+ fprintf(stderr, "global\n");
+ break;
+ }
+
+ fprintf(stderr, "%s", env->typeString());
+
+ Rooted<Scope*> scope(cx);
+ if (env->is<VarEnvironmentObject>()) {
+ scope = &env->as<VarEnvironmentObject>().scope();
+ }
+ if (env->is<WasmInstanceEnvironmentObject>()) {
+ scope = &env->as<WasmInstanceEnvironmentObject>().scope();
+ } else if (env->is<WasmFunctionCallObject>()) {
+ scope = &env->as<WasmFunctionCallObject>().scope();
+ } else if (env->is<ScopedLexicalEnvironmentObject>()) {
+ scope = &env->as<ScopedLexicalEnvironmentObject>().scope();
+ }
+
+ if (scope) {
+ fprintf(stderr, " {\n");
+ for (Rooted<BindingIter> bi(cx, BindingIter(scope)); bi; bi++) {
+ if (bi.location().kind() == BindingLocation::Kind::Environment) {
+ UniqueChars bytes = AtomToPrintableString(cx, bi.name());
+ if (!bytes) {
+ fprintf(stderr, " *** out of memory\n");
+ return;
+ }
+
+ fprintf(stderr, " %u: %s %s\n", bi.location().slot(),
+ BindingKindString(bi.kind()), bytes.get());
+ }
+ }
+ fprintf(stderr, "}");
+ }
+
+ fprintf(stderr, "\n");
+
+ if (envObj->is<DebugEnvironmentProxy>()) {
+ envObj = &envObj->as<DebugEnvironmentProxy>().enclosingEnvironment();
+ } else {
+ envObj = &env->enclosingEnvironment();
+ }
+
+ if (envObj) {
+ fprintf(stderr, "-> ");
+ }
+ }
+}
+
+void EnvironmentObject::dump() { DumpEnvironmentObject(this); }
+
+void DebugEnvironmentProxy::dump() { DumpEnvironmentObject(this); }
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
diff --git a/js/src/vm/EnvironmentObject.h b/js/src/vm/EnvironmentObject.h
new file mode 100644
index 0000000000..a5de7e6594
--- /dev/null
+++ b/js/src/vm/EnvironmentObject.h
@@ -0,0 +1,1512 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_EnvironmentObject_h
+#define vm_EnvironmentObject_h
+
+#include <type_traits>
+
+#include "frontend/NameAnalysisTypes.h"
+#include "gc/Barrier.h"
+#include "gc/WeakMap.h"
+#include "js/GCHashTable.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSObject.h"
+#include "vm/ProxyObject.h"
+#include "vm/Scope.h"
+#include "vm/ScopeKind.h" // ScopeKind
+
+namespace js {
+
+class AbstractGeneratorObject;
+class IndirectBindingMap;
+class ModuleObject;
+
+/*
+ * Return a shape representing the static scope containing the variable
+ * accessed by the ALIASEDVAR op at 'pc'.
+ */
+extern SharedShape* EnvironmentCoordinateToEnvironmentShape(JSScript* script,
+ jsbytecode* pc);
+
+// Return the name being accessed by the given ALIASEDVAR op. This function is
+// relatively slow so it should not be used on hot paths.
+extern PropertyName* EnvironmentCoordinateNameSlow(JSScript* script,
+ jsbytecode* pc);
+
+/*** Environment objects ****************************************************/
+
+// clang-format off
+/*
+ * [SMDOC] Environment Objects
+ *
+ * About environments
+ * ------------------
+ *
+ * See also: https://tc39.es/ecma262/#sec-environment-records
+ *
+ * Scoping in ES is specified in terms of "Environment Records". There's a
+ * global Environment Record per realm, and a new Environment Record is created
+ * whenever control enters a function, block, or other scope.
+ *
+ * A "Lexical Environment" is a list of nested Environment Records, innermost
+ * first: everything that's in scope. Throughout SpiderMonkey, "environment"
+ * means a Lexical Environment.
+ *
+ * N.B.: "Scope" means something different: a static scope, the compile-time
+ * analogue of an environment. See Scope.h.
+ *
+ * How SpiderMonkey represents environments
+ * ----------------------------------------
+ *
+ * Some environments are stored as JSObjects. Several kinds of objects
+ * represent environments:
+ *
+ * JSObject
+ * |
+ * +--NativeObject
+ * | |
+ * | +--EnvironmentObject Engine-internal environment
+ * | | |
+ * | | +--CallObject Environment of entire function
+ * | | |
+ * | | +--VarEnvironmentObject See VarScope in Scope.h.
+ * | | |
+ * | | +--ModuleEnvironmentObject
+ * | | | Module top-level environment
+ * | | |
+ * | | +--WasmInstanceEnvironmentObject
+ * | | |
+ * | | +--WasmFunctionCallObject
+ * | | |
+ * | | +--LexicalEnvironmentObject
+ * | | | |
+ * | | | +--ScopedLexicalEnvironmentObject
+ * | | | | | Non-extensible lexical environment
+ * | | | | |
+ * | | | | +--BlockLexicalEnvironmentObject
+ * | | | | | | Blocks and such: syntactic,
+ * | | | | | | non-extensible
+ * | | | | | |
+ * | | | | | +--NamedLambdaObject
+ * | | | | | Environment for `(function f(){...})`
+ * | | | | | containing only a binding for `f`
+ * | | | | |
+ * | | | | +--ClassBodyLexicalEnvironmentObject
+ * | | | | Environment for class body, containing
+ * | | | | private names, private brands, and
+ * | | | | static initializers list
+ * | | | |
+ * | | | +--ExtensibleLexicalEnvironmentObject
+ * | | | |
+ * | | | +--GlobalLexicalEnvironmentObject
+ * | | | | Top-level let/const/class in scripts
+ * | | | |
+ * | | | +--NonSyntacticLexicalEnvironmentObject
+ * | | | See "Non-syntactic environments" below
+ * | | |
+ * | | +--NonSyntacticVariablesObject
+ * | | | See "Non-syntactic environments" below
+ * | | |
+ * | | +--WithEnvironmentObject Presents object properties as bindings
+ * | | |
+ * | | +--RuntimeLexicalErrorObject
+ * | | Special value represents uninitialized
+ * | | lexical slots
+ * | |
+ * | +--GlobalObject The global environment (dynamically
+ * | presents its properties as bindings)
+ * +--ProxyObject
+ * |
+ * +--DebugEnvironmentProxy Environment for debugger eval-in-frame
+ *
+ * EnvironmentObjects are technically real JSObjects but only belong on the
+ * environment chain (that is, fp->environmentChain() or fun->environment()).
+ * They are never exposed to scripts.
+ *
+ * Note that reserved slots in any base classes shown above are fixed for all
+ * derived classes. So e.g. EnvironmentObject::enclosingEnvironment() can
+ * simply access a fixed slot without further dynamic type information.
+ *
+ * When the current environment is represented by an object, the stack frame
+ * has a pointer to that object (see AbstractFramePtr::environmentChain()).
+ * However, that isn't always the case. Where possible, we store binding values
+ * in JS stack slots. For block and function scopes where all bindings can be
+ * stored in stack slots, nothing is allocated in the heap; there is no
+ * environment object.
+ *
+ * Full information about the environment chain is always recoverable:
+ * EnvironmentIter can do it, and we construct a fake environment for debugger
+ * eval-in-frame (see "Debug environment objects" below).
+ *
+ * Syntactic Environments
+ * ----------------------
+ *
+ * Environments may be syntactic, i.e., corresponding to source text, or
+ * non-syntactic, i.e., specially created by embedding. The distinction is
+ * necessary to maintain invariants about the environment chain: non-syntactic
+ * environments may not occur in arbitrary positions in the chain.
+ *
+ * CallObject, ModuleEnvironmentObject, BlockLexicalEnvironmentObject, and
+ * GlobalLexicalEnvironmentObject always represent syntactic
+ * environments. (CallObject is considered syntactic even when it's used as the
+ * scope of strict eval code.) WithEnvironmentObject is syntactic when it's
+ * used to represent the scope of a `with` block.
+ *
+ *
+ * Non-syntactic Environments
+ * --------------------------
+ *
+ * A non-syntactic environment is one that was not created due to JS source
+ * code. On the scope chain, a single NonSyntactic GlobalScope maps to 0+
+ * non-syntactic environment objects. This is contrasted with syntactic
+ * environments, where each scope corresponds to 0 or 1 environment object.
+ *
+ * There are 3 kinds of dynamic environment objects:
+ *
+ * 1. WithEnvironmentObject
+ *
+ * When the embedding compiles or executes a script, it has the option to
+ * pass in a vector of objects to be used as the initial env chain, ordered
+ * from outermost env to innermost env. Each of those objects is wrapped by
+ * a WithEnvironmentObject.
+ *
+ * The innermost object passed in by the embedding becomes a qualified
+ * variables object that captures 'var' bindings. That is, it wraps the
+ * holder object of 'var' bindings.
+ *
+ * Does not hold 'let' or 'const' bindings.
+ *
+ * 2. NonSyntacticVariablesObject
+ *
+ * When the embedding wants qualified 'var' bindings and unqualified
+ * bareword assignments to go on a different object than the global
+ * object. While any object can be made into a qualified variables object,
+ * only the GlobalObject and NonSyntacticVariablesObject are considered
+ * unqualified variables objects.
+ *
+ * Unlike WithEnvironmentObjects that delegate to the object they wrap,
+ * this object is itself the holder of 'var' bindings.
+ *
+ * Does not hold 'let' or 'const' bindings.
+ *
+ * 3. NonSyntacticLexicalEnvironmentObject
+ *
+ * Each non-syntactic object used as a qualified variables object needs to
+ * enclose a non-syntactic lexical environment to hold 'let' and 'const'
+ * bindings. There is a bijection per realm between the non-syntactic
+ * variables objects and their non-syntactic LexicalEnvironmentObjects.
+ *
+ * Does not hold 'var' bindings.
+ *
+ * The embedding (Gecko) and debugger uses non-syntactic envs for various
+ * things, all of which are detailed below. All env chain listings below are,
+ * from top to bottom, outermost to innermost.
+ *
+ * A. Component loading
+ *
+ * Components may be loaded in a shared global mode where most JSMs share a
+ * single global in order to save on memory and avoid CCWs. To support this, a
+ * NonSyntacticVariablesObject is used for each JSM to provide a basic form of
+ * isolation. NonSyntacticLexicalEnvironmentObject and
+ * NonSyntacticVariablesObject are allocated for each JSM, and
+ * NonSyntacticLexicalEnvironmentObject holds lexical variables and
+ * NonSyntacticVariablesObject holds qualified variables. JSMs cannot have
+ * unqualified names, but if unqualified names are used by subscript, they
+ * goes to NonSyntacticVariablesObject.
+ * They have the following env chain:
+ *
+ * BackstagePass global
+ * |
+ * GlobalLexicalEnvironmentObject[this=global]
+ * |
+ * NonSyntacticVariablesObject (qualified 'var's and unqualified names)
+ * |
+ * NonSyntacticLexicalEnvironmentObject[this=nsvo] (lexical vars)
+ *
+ * B.1 Subscript loading
+ *
+ * Subscripts may be loaded into a target object and it's associated global.
+ * NonSyntacticLexicalEnvironmentObject holds lexical variables and
+ * WithEnvironmentObject holds qualified variables. Unqualified names goes
+ * to the target object's global.
+ * They have the following env chain:
+ *
+ * Target object's global (unqualified names)
+ * |
+ * GlobalLexicalEnvironmentObject[this=global]
+ * |
+ * WithEnvironmentObject wrapping target (qualified 'var's)
+ * |
+ * NonSyntacticLexicalEnvironmentObject[this=target] (lexical vars)
+ *
+ * B.2 Subscript loading (Shared-global JSM)
+ *
+ * The target object of a subscript load may be in a JSM with a shared global,
+ * in which case we will also have the NonSyntacticVariablesObject on the
+ * chain.
+ * NonSyntacticLexicalEnvironmentObject for target object holds lexical
+ * variables and WithEnvironmentObject holds qualified variables.
+ * Unqualified names goes to NonSyntacticVariablesObject.
+ *
+ * Target object's global
+ * |
+ * GlobalLexicalEnvironmentObject[this=global]
+ * |
+ * NonSyntacticVariablesObject (unqualified names)
+ * |
+ * NonSyntacticLexicalEnvironmentObject[this=nsvo]
+ * |
+ * WithEnvironmentObject wrapping target (qualified 'var's)
+ * |
+ * NonSyntacticLexicalEnvironmentObject[this=target] (lexical vars)
+ *
+ * C.1. Frame scripts with unique scope
+ *
+ * XUL frame scripts with unique scope are loaded in the same global as
+ * components, with a NonSyntacticVariablesObject as a "polluting global" for
+ * both qualified 'var' variables and unqualified names, and a with
+ * environment wrapping a message manager object, and
+ * NonSyntacticLexicalEnvironmentObject holding the message manager as `this`,
+ * that holds lexical variables.
+ * These environment objects except for globals are created for each run and
+ * not shared across multiple runs. This is done exclusively in
+ * js::ExecuteInScopeChainAndReturnNewScope.
+ *
+ * BackstagePass global
+ * |
+ * GlobalLexicalEnvironmentObject[this=global]
+ * |
+ * NonSyntacticVariablesObject (qualified 'var's and unqualified names)
+ * |
+ * WithEnvironmentObject wrapping messageManager
+ * |
+ * NonSyntacticLexicalEnvironmentObject[this=messageManager] (lexical vars)
+ *
+ * C.2. Frame scripts without unique scope
+ *
+ * XUL frame scripts without unique scope are loaded in the same global as
+ * components, with a with environment wrapping a message manager object for
+ * qualified 'var' variables, and NonSyntacticLexicalEnvironmentObject holding
+ * the message manager as `this`, that holds lexical variables.
+ * The environment chain is associated with the message manager object
+ * and cached for subsequent runs.
+ *
+ * BackstagePass global (unqualified names)
+ * |
+ * GlobalLexicalEnvironmentObject[this=global]
+ * |
+ * WithEnvironmentObject wrapping messageManager (qualified names)
+ * |
+ * NonSyntacticLexicalEnvironmentObject[this=messageManager] (lexical vars)
+ *
+ * D.1. DOM event handlers without direct eval
+ *
+ * DOM event handlers are compiled as functions with HTML elements on the
+ * environment chain. For a chain of elements e0, e1, ..., eN, where innerrmost
+ * element is the target element, enclosing elements are such as forms, and the
+ * outermost one is the document.
+ * If the DOM event handlers don't have direct eval, the function's scopes are
+ * optimized and frame slots are used for qualified 'var's and lexical vars.
+ * NonSyntacticLexicalEnvironmentObject's `this` value is not used, given
+ * the function's `this` value is used instead:
+ *
+ * global (unqualified names)
+ * |
+ * GlobalLexicalEnvironmentObject[this=global]
+ * |
+ * WithEnvironmentObject wrapping eN
+ * |
+ * ...
+ * |
+ * WithEnvironmentObject wrapping e1
+ * |
+ * WithEnvironmentObject wrapping e0
+ * |
+ * NonSyntacticLexicalEnvironmentObject [this=*unused*]
+ *
+ * D.2. DOM event handlers with direct eval
+ *
+ * If DOM event handlers have direct eval, the function's scopes are allocated
+ * as environment object:
+ *
+ * global (unqualified names)
+ * |
+ * GlobalLexicalEnvironmentObject[this=global]
+ * |
+ * ...
+ * |
+ * WithEnvironmentObject wrapping e1
+ * |
+ * WithEnvironmentObject wrapping e0
+ * |
+ * NonSyntacticLexicalEnvironmentObject [this=*unused*]
+ * |
+ * CallObject (qualified 'var's)
+ * |
+ * BlockLexicalEnvironmentObject (lexical vars)
+ *
+ * E.1. Debugger.Frame.prototype.evalWithBindings
+ *
+ * Debugger.Frame.prototype.evalWithBindings uses WithEnvironmentObject for
+ * given bindings, and the frame's enclosing scope
+ *
+ * If the frame is function, it has the following env chain.
+ * lexical variables are optimized and uses frame slots:
+ *
+ * global (unqualified names)
+ * |
+ * [DebugProxy] GlobalLexicalEnvironmentObject[this=global]
+ * |
+ * [DebugProxy] CallObject (qualified 'var's)
+ * |
+ * WithEnvironmentObject wrapping bindings
+ *
+ * If the script has direct eval, BlockLexicalEnvironmentObject is created for
+ * it:
+ *
+ * global (unqualified names)
+ * |
+ * [DebugProxy] GlobalLexicalEnvironmentObject[this=global]
+ * |
+ * [DebugProxy] CallObject (qualified 'var's)
+ * |
+ * BlockLexicalEnvironmentObject (lexical)
+ * |
+ * WithEnvironmentObject wrapping bindings
+ *
+ * NOTE: Debugger.Frame.prototype.eval uses the frame's enclosing scope only,
+ * and it doesn't use any dynamic environment, but still uses
+ * non-syntactic scope to perform `eval` operation.
+ *
+ * E.2. Debugger.Object.prototype.executeInGlobalWithBindings
+ *
+ * Debugger.Object.prototype.executeInGlobalWithBindings uses
+ * WithEnvironmentObject for given bindings, and the object's global scope:
+ *
+ * global (qualified 'var's and unqualified names)
+ * |
+ * GlobalLexicalEnvironmentObject[this=global] (lexical)
+ * |
+ * WithEnvironmentObject wrapping bindings
+ *
+ * NOTE: Debugger.Object.prototype.executeInGlobal uses the object's global
+ * scope only, and it doesn't use any dynamic environment or
+ * non-syntactic scope.
+ *
+ */
+// clang-format on
+
+class EnvironmentObject : public NativeObject {
+ protected:
+ // The enclosing environment. Either another EnvironmentObject, a
+ // GlobalObject, or a non-syntactic environment object.
+ static const uint32_t ENCLOSING_ENV_SLOT = 0;
+
+ inline void setAliasedBinding(uint32_t slot, const Value& v);
+
+ public:
+ // Since every env chain terminates with a global object, whether
+ // GlobalObject or a non-syntactic one, and since those objects do not
+ // derive EnvironmentObject (they have completely different layouts), the
+ // enclosing environment of an EnvironmentObject is necessarily non-null.
+ JSObject& enclosingEnvironment() const {
+ return getReservedSlot(ENCLOSING_ENV_SLOT).toObject();
+ }
+
+ void initEnclosingEnvironment(JSObject* enclosing) {
+ initReservedSlot(ENCLOSING_ENV_SLOT, ObjectOrNullValue(enclosing));
+ }
+
+ static bool nonExtensibleIsFixedSlot(EnvironmentCoordinate ec) {
+ // For non-extensible environment objects isFixedSlot(slot) is equivalent to
+ // slot < MAX_FIXED_SLOTS.
+ return ec.slot() < MAX_FIXED_SLOTS;
+ }
+ static size_t nonExtensibleDynamicSlotIndex(EnvironmentCoordinate ec) {
+ MOZ_ASSERT(!nonExtensibleIsFixedSlot(ec));
+ return ec.slot() - MAX_FIXED_SLOTS;
+ }
+
+ // Get or set a name contained in this environment.
+ inline const Value& aliasedBinding(EnvironmentCoordinate ec);
+
+ const Value& aliasedBinding(const BindingIter& bi) {
+ MOZ_ASSERT(bi.location().kind() == BindingLocation::Kind::Environment);
+ return getSlot(bi.location().slot());
+ }
+
+ inline void setAliasedBinding(EnvironmentCoordinate ec, const Value& v);
+
+ inline void setAliasedBinding(const BindingIter& bi, const Value& v);
+
+ // For JITs.
+ static size_t offsetOfEnclosingEnvironment() {
+ return getFixedSlotOffset(ENCLOSING_ENV_SLOT);
+ }
+
+ static uint32_t enclosingEnvironmentSlot() { return ENCLOSING_ENV_SLOT; }
+
+ const char* typeString() const;
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ void dump();
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
+};
+
+class CallObject : public EnvironmentObject {
+ protected:
+ static constexpr uint32_t CALLEE_SLOT = 1;
+
+ static CallObject* create(JSContext* cx, HandleScript script,
+ HandleObject enclosing, gc::Heap heap);
+
+ public:
+ static const JSClass class_;
+
+ static constexpr uint32_t RESERVED_SLOTS = 2;
+ static constexpr ObjectFlags OBJECT_FLAGS = {ObjectFlag::QualifiedVarObj};
+
+ /* These functions are internal and are exposed only for JITs. */
+
+ /*
+ * Construct a bare-bones call object given a shape.
+ * The call object must be further initialized to be usable.
+ */
+ static CallObject* createWithShape(JSContext* cx, Handle<SharedShape*> shape);
+
+ static CallObject* createTemplateObject(JSContext* cx, HandleScript script,
+ HandleObject enclosing);
+
+ static CallObject* create(JSContext* cx, AbstractFramePtr frame);
+
+ static CallObject* createHollowForDebug(JSContext* cx, HandleFunction callee);
+
+ // If `env` or any enclosing environment is a CallObject, return that
+ // CallObject; else null.
+ //
+ // `env` may be a DebugEnvironmentProxy, but not a hollow environment.
+ static CallObject* find(JSObject* env);
+
+ /*
+ * When an aliased formal (var accessed by nested closures) is also
+ * aliased by the arguments object, it must of course exist in one
+ * canonical location and that location is always the CallObject. For this
+ * to work, the ArgumentsObject stores special MagicValue in its array for
+ * forwarded-to-CallObject variables. This MagicValue's payload is the
+ * slot of the CallObject to access.
+ */
+ const Value& aliasedFormalFromArguments(const Value& argsValue) {
+ return getSlot(ArgumentsObject::SlotFromMagicScopeSlotValue(argsValue));
+ }
+ inline void setAliasedFormalFromArguments(const Value& argsValue,
+ const Value& v);
+
+ JSFunction& callee() const {
+ return getReservedSlot(CALLEE_SLOT).toObject().as<JSFunction>();
+ }
+
+ /* For jit access. */
+ static size_t offsetOfCallee() { return getFixedSlotOffset(CALLEE_SLOT); }
+
+ static size_t calleeSlot() { return CALLEE_SLOT; }
+};
+
+class VarEnvironmentObject : public EnvironmentObject {
+ static constexpr uint32_t SCOPE_SLOT = 1;
+
+ static VarEnvironmentObject* createInternal(JSContext* cx,
+ Handle<SharedShape*> shape,
+ HandleObject enclosing,
+ gc::Heap heap);
+
+ static VarEnvironmentObject* create(JSContext* cx, Handle<Scope*> scope,
+ HandleObject enclosing, gc::Heap heap);
+
+ void initScope(Scope* scope) {
+ initReservedSlot(SCOPE_SLOT, PrivateGCThingValue(scope));
+ }
+
+ public:
+ static const JSClass class_;
+
+ static constexpr uint32_t RESERVED_SLOTS = 2;
+ static constexpr ObjectFlags OBJECT_FLAGS = {ObjectFlag::QualifiedVarObj};
+
+ static VarEnvironmentObject* createForFrame(JSContext* cx,
+ Handle<Scope*> scope,
+ AbstractFramePtr frame);
+ static VarEnvironmentObject* createHollowForDebug(JSContext* cx,
+ Handle<Scope*> scope);
+ static VarEnvironmentObject* createTemplateObject(JSContext* cx,
+ Handle<VarScope*> scope);
+ static VarEnvironmentObject* createWithoutEnclosing(JSContext* cx,
+ Handle<VarScope*> scope);
+
+ Scope& scope() const {
+ Value v = getReservedSlot(SCOPE_SLOT);
+ MOZ_ASSERT(v.isPrivateGCThing());
+ Scope& s = *static_cast<Scope*>(v.toGCThing());
+ MOZ_ASSERT(s.is<VarScope>() || s.is<EvalScope>());
+ return s;
+ }
+
+ bool isForEval() const { return scope().is<EvalScope>(); }
+ bool isForNonStrictEval() const { return scope().kind() == ScopeKind::Eval; }
+};
+
+class ModuleEnvironmentObject : public EnvironmentObject {
+ static constexpr uint32_t MODULE_SLOT = 1;
+
+ static const ObjectOps objectOps_;
+ static const JSClassOps classOps_;
+
+ public:
+ static const JSClass class_;
+
+ static constexpr uint32_t RESERVED_SLOTS = 2;
+ static constexpr ObjectFlags OBJECT_FLAGS = {ObjectFlag::NotExtensible,
+ ObjectFlag::QualifiedVarObj};
+
+ static ModuleEnvironmentObject* create(JSContext* cx,
+ Handle<ModuleObject*> module);
+ ModuleObject& module() const;
+ IndirectBindingMap& importBindings() const;
+
+ bool createImportBinding(JSContext* cx, Handle<JSAtom*> importName,
+ Handle<ModuleObject*> module,
+ Handle<JSAtom*> exportName);
+
+ bool hasImportBinding(Handle<PropertyName*> name);
+
+ bool lookupImport(jsid name, ModuleEnvironmentObject** envOut,
+ mozilla::Maybe<PropertyInfo>* propOut);
+
+ // If `env` or any enclosing environment is a ModuleEnvironmentObject,
+ // return that ModuleEnvironmentObject; else null.
+ //
+ // `env` may be a DebugEnvironmentProxy, but not a hollow environment.
+ static ModuleEnvironmentObject* find(JSObject* env);
+
+ private:
+ static bool lookupProperty(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleObject objp, PropertyResult* propp);
+ static bool hasProperty(JSContext* cx, HandleObject obj, HandleId id,
+ bool* foundp);
+ static bool getProperty(JSContext* cx, HandleObject obj, HandleValue receiver,
+ HandleId id, MutableHandleValue vp);
+ static bool setProperty(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue v, HandleValue receiver,
+ JS::ObjectOpResult& result);
+ static bool getOwnPropertyDescriptor(
+ JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandle<mozilla::Maybe<PropertyDescriptor>> desc);
+ static bool deleteProperty(JSContext* cx, HandleObject obj, HandleId id,
+ ObjectOpResult& result);
+ static bool newEnumerate(JSContext* cx, HandleObject obj,
+ MutableHandleIdVector properties,
+ bool enumerableOnly);
+};
+
+class WasmInstanceEnvironmentObject : public EnvironmentObject {
+ // Currently WasmInstanceScopes do not use their scopes in a
+ // meaningful way. However, it is an invariant of DebugEnvironments that
+ // environments kept in those maps have live scopes, thus this strong
+ // reference.
+ static constexpr uint32_t SCOPE_SLOT = 1;
+
+ public:
+ static const JSClass class_;
+
+ static constexpr uint32_t RESERVED_SLOTS = 2;
+ static constexpr ObjectFlags OBJECT_FLAGS = {ObjectFlag::NotExtensible};
+
+ static WasmInstanceEnvironmentObject* createHollowForDebug(
+ JSContext* cx, Handle<WasmInstanceScope*> scope);
+ WasmInstanceScope& scope() const {
+ Value v = getReservedSlot(SCOPE_SLOT);
+ MOZ_ASSERT(v.isPrivateGCThing());
+ return *static_cast<WasmInstanceScope*>(v.toGCThing());
+ }
+};
+
+class WasmFunctionCallObject : public EnvironmentObject {
+ // Currently WasmFunctionCallObjects do not use their scopes in a
+ // meaningful way. However, it is an invariant of DebugEnvironments that
+ // environments kept in those maps have live scopes, thus this strong
+ // reference.
+ static constexpr uint32_t SCOPE_SLOT = 1;
+
+ public:
+ static const JSClass class_;
+
+ // TODO Check what Debugger behavior should be when it evaluates a
+ // var declaration.
+ static constexpr uint32_t RESERVED_SLOTS = 2;
+ static constexpr ObjectFlags OBJECT_FLAGS = {ObjectFlag::NotExtensible};
+
+ static WasmFunctionCallObject* createHollowForDebug(
+ JSContext* cx, HandleObject enclosing, Handle<WasmFunctionScope*> scope);
+ WasmFunctionScope& scope() const {
+ Value v = getReservedSlot(SCOPE_SLOT);
+ MOZ_ASSERT(v.isPrivateGCThing());
+ return *static_cast<WasmFunctionScope*>(v.toGCThing());
+ }
+};
+
+// Abstract base class for environments that can contain let/const bindings,
+// plus a few other kinds of environments, such as `catch` blocks, that have
+// similar behavior.
+class LexicalEnvironmentObject : public EnvironmentObject {
+ protected:
+ // Global and non-syntactic lexical environments need to store a 'this'
+ // object and all other lexical environments have a fixed shape and store a
+ // backpointer to the LexicalScope.
+ //
+ // Since the two sets are disjoint, we only use one slot to save space.
+ static constexpr uint32_t THIS_VALUE_OR_SCOPE_SLOT = 1;
+
+ public:
+ static const JSClass class_;
+
+ static constexpr uint32_t RESERVED_SLOTS = 2;
+
+ protected:
+ static LexicalEnvironmentObject* create(JSContext* cx,
+ Handle<SharedShape*> shape,
+ HandleObject enclosing,
+ gc::Heap heap);
+
+ public:
+ // Is this the global lexical scope?
+ bool isGlobal() const { return enclosingEnvironment().is<GlobalObject>(); }
+
+ // Global and non-syntactic lexical scopes are extensible. All other
+ // lexical scopes are not.
+ bool isExtensible() const;
+
+ // Is this a syntactic (i.e. corresponds to a source text) lexical
+ // environment?
+ bool isSyntactic() const { return !isExtensible() || isGlobal(); }
+};
+
+// A non-extensible lexical environment.
+//
+// Used for blocks (ScopeKind::Lexical) and several other scope kinds,
+// including Catch, NamedLambda, FunctionLexical, and ClassBody.
+class ScopedLexicalEnvironmentObject : public LexicalEnvironmentObject {
+ public:
+ static constexpr ObjectFlags OBJECT_FLAGS = {ObjectFlag::NotExtensible};
+
+ Scope& scope() const {
+ Value v = getReservedSlot(THIS_VALUE_OR_SCOPE_SLOT);
+ MOZ_ASSERT(!isExtensible() && v.isPrivateGCThing());
+ return *static_cast<Scope*>(v.toGCThing());
+ }
+
+ bool isClassBody() const { return scope().kind() == ScopeKind::ClassBody; }
+
+ void initScope(Scope* scope) {
+ initReservedSlot(THIS_VALUE_OR_SCOPE_SLOT, PrivateGCThingValue(scope));
+ }
+};
+
+class BlockLexicalEnvironmentObject : public ScopedLexicalEnvironmentObject {
+ protected:
+ static BlockLexicalEnvironmentObject* create(JSContext* cx,
+ Handle<LexicalScope*> scope,
+ HandleObject enclosing,
+ gc::Heap heap);
+
+ public:
+ static constexpr ObjectFlags OBJECT_FLAGS = {ObjectFlag::NotExtensible};
+
+ static BlockLexicalEnvironmentObject* createForFrame(
+ JSContext* cx, Handle<LexicalScope*> scope, AbstractFramePtr frame);
+
+ static BlockLexicalEnvironmentObject* createHollowForDebug(
+ JSContext* cx, Handle<LexicalScope*> scope);
+
+ static BlockLexicalEnvironmentObject* createTemplateObject(
+ JSContext* cx, Handle<LexicalScope*> scope);
+
+ static BlockLexicalEnvironmentObject* createWithoutEnclosing(
+ JSContext* cx, Handle<LexicalScope*> scope);
+
+ // Create a new BlockLexicalEnvironmentObject with the same enclosing env and
+ // variable values as this.
+ static BlockLexicalEnvironmentObject* clone(
+ JSContext* cx, Handle<BlockLexicalEnvironmentObject*> env);
+
+ // Create a new BlockLexicalEnvironmentObject with the same enclosing env as
+ // this, with all variables uninitialized.
+ static BlockLexicalEnvironmentObject* recreate(
+ JSContext* cx, Handle<BlockLexicalEnvironmentObject*> env);
+
+ // The LexicalScope that created this environment.
+ LexicalScope& scope() const {
+ return ScopedLexicalEnvironmentObject::scope().as<LexicalScope>();
+ }
+};
+
+class NamedLambdaObject : public BlockLexicalEnvironmentObject {
+ static NamedLambdaObject* create(JSContext* cx, HandleFunction callee,
+ HandleObject enclosing, gc::Heap heap);
+
+ public:
+ static NamedLambdaObject* createTemplateObject(JSContext* cx,
+ HandleFunction callee);
+
+ static NamedLambdaObject* createWithoutEnclosing(JSContext* cx,
+ HandleFunction callee);
+
+ static NamedLambdaObject* create(JSContext* cx, AbstractFramePtr frame);
+
+ // For JITs.
+ static size_t lambdaSlot();
+
+ static size_t offsetOfLambdaSlot() {
+ return getFixedSlotOffset(lambdaSlot());
+ }
+};
+
+class ClassBodyLexicalEnvironmentObject
+ : public ScopedLexicalEnvironmentObject {
+ static ClassBodyLexicalEnvironmentObject* create(
+ JSContext* cx, Handle<ClassBodyScope*> scope, HandleObject enclosing,
+ gc::Heap heap);
+
+ public:
+ static ClassBodyLexicalEnvironmentObject* createForFrame(
+ JSContext* cx, Handle<ClassBodyScope*> scope, AbstractFramePtr frame);
+
+ static ClassBodyLexicalEnvironmentObject* createTemplateObject(
+ JSContext* cx, Handle<ClassBodyScope*> scope);
+
+ static ClassBodyLexicalEnvironmentObject* createWithoutEnclosing(
+ JSContext* cx, Handle<ClassBodyScope*> scope);
+
+ // The ClassBodyScope that created this environment.
+ ClassBodyScope& scope() const {
+ return ScopedLexicalEnvironmentObject::scope().as<ClassBodyScope>();
+ }
+
+ static uint32_t privateBrandSlot() { return JSSLOT_FREE(&class_); }
+};
+
+// Global and non-syntactic lexical environments are extensible.
+class ExtensibleLexicalEnvironmentObject : public LexicalEnvironmentObject {
+ public:
+ JSObject* thisObject() const;
+
+ // For a given global object or JSMEnvironment `obj`, return the associated
+ // global lexical or non-syntactic lexical environment, where top-level `let`
+ // bindings are added.
+ static ExtensibleLexicalEnvironmentObject* forVarEnvironment(JSObject* obj);
+
+ protected:
+ void initThisObject(JSObject* obj) {
+ MOZ_ASSERT(isGlobal() || !isSyntactic());
+ JSObject* thisObj = GetThisObject(obj);
+ initReservedSlot(THIS_VALUE_OR_SCOPE_SLOT, ObjectValue(*thisObj));
+ }
+};
+
+// The global lexical environment, where global let/const/class bindings are
+// added.
+class GlobalLexicalEnvironmentObject
+ : public ExtensibleLexicalEnvironmentObject {
+ public:
+ static GlobalLexicalEnvironmentObject* create(JSContext* cx,
+ Handle<GlobalObject*> global);
+
+ GlobalObject& global() const {
+ return enclosingEnvironment().as<GlobalObject>();
+ }
+
+ void setWindowProxyThisObject(JSObject* obj);
+
+ static constexpr size_t offsetOfThisValueSlot() {
+ return getFixedSlotOffset(THIS_VALUE_OR_SCOPE_SLOT);
+ }
+};
+
+// Non-standard. See "Non-syntactic Environments" above.
+class NonSyntacticLexicalEnvironmentObject
+ : public ExtensibleLexicalEnvironmentObject {
+ public:
+ static NonSyntacticLexicalEnvironmentObject* create(JSContext* cx,
+ HandleObject enclosing,
+ HandleObject thisv);
+};
+
+// A non-syntactic dynamic scope object that captures non-lexical
+// bindings. That is, a scope object that captures both qualified var
+// assignments and unqualified bareword assignments. Its parent is always the
+// global lexical environment.
+//
+// This is used in ExecuteInGlobalAndReturnScope and sits in front of the
+// global scope to store 'var' bindings, and to store fresh properties created
+// by assignments to undeclared variables that otherwise would have gone on
+// the global object.
+class NonSyntacticVariablesObject : public EnvironmentObject {
+ public:
+ static const JSClass class_;
+
+ static constexpr uint32_t RESERVED_SLOTS = 1;
+ static constexpr ObjectFlags OBJECT_FLAGS = {};
+
+ static NonSyntacticVariablesObject* create(JSContext* cx);
+};
+
+extern bool CreateNonSyntacticEnvironmentChain(JSContext* cx,
+ JS::HandleObjectVector envChain,
+ MutableHandleObject env);
+
+// With environment objects on the run-time environment chain.
+class WithEnvironmentObject : public EnvironmentObject {
+ static constexpr uint32_t OBJECT_SLOT = 1;
+ static constexpr uint32_t THIS_SLOT = 2;
+ static constexpr uint32_t SCOPE_SLOT = 3;
+
+ public:
+ static const JSClass class_;
+
+ static constexpr uint32_t RESERVED_SLOTS = 4;
+ static constexpr ObjectFlags OBJECT_FLAGS = {};
+
+ static WithEnvironmentObject* create(JSContext* cx, HandleObject object,
+ HandleObject enclosing,
+ Handle<WithScope*> scope);
+ static WithEnvironmentObject* createNonSyntactic(JSContext* cx,
+ HandleObject object,
+ HandleObject enclosing);
+
+ /* Return the 'o' in 'with (o)'. */
+ JSObject& object() const;
+
+ /* Return object for GetThisValue. */
+ JSObject* withThis() const;
+
+ /*
+ * Return whether this object is a syntactic with object. If not, this is
+ * a With object we inserted between the outermost syntactic scope and the
+ * global object to wrap the environment chain someone explicitly passed
+ * via JSAPI to CompileFunction or script evaluation.
+ */
+ bool isSyntactic() const;
+
+ // For syntactic with environment objects, the with scope.
+ WithScope& scope() const;
+
+ static inline size_t objectSlot() { return OBJECT_SLOT; }
+
+ static inline size_t thisSlot() { return THIS_SLOT; }
+};
+
+// Internal scope object used by JSOp::BindName upon encountering an
+// uninitialized lexical slot or an assignment to a 'const' binding.
+//
+// ES6 lexical bindings cannot be accessed in any way (throwing
+// ReferenceErrors) until initialized. Normally, NAME operations
+// unconditionally check for uninitialized lexical slots. When getting or
+// looking up names, this can be done without slowing down normal operations
+// on the return value. When setting names, however, we do not want to pollute
+// all set-property paths with uninitialized lexical checks. For setting names
+// (i.e. JSOp::SetName), we emit an accompanying, preceding JSOp::BindName which
+// finds the right scope on which to set the name. Moreover, when the name on
+// the scope is an uninitialized lexical, we cannot throw eagerly, as the spec
+// demands that the error be thrown after evaluating the RHS of
+// assignments. Instead, this sentinel scope object is pushed on the stack.
+// Attempting to access anything on this scope throws the appropriate
+// ReferenceError.
+//
+// ES6 'const' bindings induce a runtime error when assigned to outside
+// of initialization, regardless of strictness.
+class RuntimeLexicalErrorObject : public EnvironmentObject {
+ static const unsigned ERROR_SLOT = 1;
+
+ public:
+ static const unsigned RESERVED_SLOTS = 2;
+ static const JSClass class_;
+
+ static RuntimeLexicalErrorObject* create(JSContext* cx,
+ HandleObject enclosing,
+ unsigned errorNumber);
+
+ unsigned errorNumber() { return getReservedSlot(ERROR_SLOT).toInt32(); }
+};
+
+/****************************************************************************/
+
+// A environment iterator describes the active environments starting from an
+// environment, scope pair. This pair may be derived from the current point of
+// execution in a frame. If derived in such a fashion, the EnvironmentIter
+// tracks whether the current scope is within the extent of this initial
+// frame. Here, "frame" means a single activation of: a function, eval, or
+// global code.
+class MOZ_RAII EnvironmentIter {
+ Rooted<ScopeIter> si_;
+ RootedObject env_;
+ AbstractFramePtr frame_;
+
+ void incrementScopeIter();
+ void settle();
+
+ // No value semantics.
+ EnvironmentIter(const EnvironmentIter& ei) = delete;
+
+ public:
+ // Constructing from a copy of an existing EnvironmentIter.
+ EnvironmentIter(JSContext* cx, const EnvironmentIter& ei);
+
+ // Constructing from an environment, scope pair. All environments
+ // considered not to be withinInitialFrame, since no frame is given.
+ EnvironmentIter(JSContext* cx, JSObject* env, Scope* scope);
+
+ // Constructing from a frame. Places the EnvironmentIter on the innermost
+ // environment at pc.
+ EnvironmentIter(JSContext* cx, AbstractFramePtr frame, const jsbytecode* pc);
+
+ // Constructing from an environment, scope and frame. The frame is given
+ // to initialize to proper enclosing environment/scope.
+ EnvironmentIter(JSContext* cx, JSObject* env, Scope* scope,
+ AbstractFramePtr frame);
+
+ bool done() const { return si_.done(); }
+
+ explicit operator bool() const { return !done(); }
+
+ void operator++(int) {
+ if (hasAnyEnvironmentObject()) {
+ env_ = &env_->as<EnvironmentObject>().enclosingEnvironment();
+ }
+ incrementScopeIter();
+ settle();
+ }
+
+ EnvironmentIter& operator++() {
+ operator++(1);
+ return *this;
+ }
+
+ // If done():
+ JSObject& enclosingEnvironment() const;
+
+ // If !done():
+ bool hasNonSyntacticEnvironmentObject() const;
+
+ bool hasSyntacticEnvironment() const { return si_.hasSyntacticEnvironment(); }
+
+ bool hasAnyEnvironmentObject() const {
+ return hasNonSyntacticEnvironmentObject() || hasSyntacticEnvironment();
+ }
+
+ EnvironmentObject& environment() const {
+ MOZ_ASSERT(hasAnyEnvironmentObject());
+ return env_->as<EnvironmentObject>();
+ }
+
+ Scope& scope() const { return *si_.scope(); }
+
+ Scope* maybeScope() const {
+ if (si_) {
+ return si_.scope();
+ }
+ return nullptr;
+ }
+
+ JSFunction& callee() const { return env_->as<CallObject>().callee(); }
+
+ bool withinInitialFrame() const { return !!frame_; }
+
+ AbstractFramePtr initialFrame() const {
+ MOZ_ASSERT(withinInitialFrame());
+ return frame_;
+ }
+
+ AbstractFramePtr maybeInitialFrame() const { return frame_; }
+};
+
+// The key in MissingEnvironmentMap. For live frames, maps live frames to
+// their synthesized environments. For completely optimized-out environments,
+// maps the Scope to their synthesized environments. The env we synthesize for
+// Scopes are read-only, and we never use their parent links, so they don't
+// need to be distinct.
+//
+// That is, completely optimized out environments can't be distinguished by
+// frame. Note that even if the frame corresponding to the Scope is live on
+// the stack, it is unsound to synthesize an environment from that live
+// frame. In other words, the provenance of the environment chain is from
+// allocated closures (i.e., allocation sites) and is irrecoverable from
+// simple stack inspection (i.e., call sites).
+class MissingEnvironmentKey {
+ friend class LiveEnvironmentVal;
+
+ AbstractFramePtr frame_;
+ Scope* scope_;
+
+ public:
+ explicit MissingEnvironmentKey(const EnvironmentIter& ei)
+ : frame_(ei.maybeInitialFrame()), scope_(ei.maybeScope()) {}
+
+ MissingEnvironmentKey(AbstractFramePtr frame, Scope* scope)
+ : frame_(frame), scope_(scope) {}
+
+ AbstractFramePtr frame() const { return frame_; }
+ Scope* scope() const { return scope_; }
+
+ void updateScope(Scope* scope) { scope_ = scope; }
+ void updateFrame(AbstractFramePtr frame) { frame_ = frame; }
+
+ // For use as hash policy.
+ using Lookup = MissingEnvironmentKey;
+ static HashNumber hash(MissingEnvironmentKey sk);
+ static bool match(MissingEnvironmentKey sk1, MissingEnvironmentKey sk2);
+ bool operator!=(const MissingEnvironmentKey& other) const {
+ return frame_ != other.frame_ || scope_ != other.scope_;
+ }
+ static void rekey(MissingEnvironmentKey& k,
+ const MissingEnvironmentKey& newKey) {
+ k = newKey;
+ }
+};
+
+// The value in LiveEnvironmentMap, mapped from by live environment objects.
+class LiveEnvironmentVal {
+ friend class DebugEnvironments;
+ friend class MissingEnvironmentKey;
+
+ AbstractFramePtr frame_;
+ HeapPtr<Scope*> scope_;
+
+ static void staticAsserts();
+
+ public:
+ explicit LiveEnvironmentVal(const EnvironmentIter& ei)
+ : frame_(ei.initialFrame()), scope_(ei.maybeScope()) {}
+
+ AbstractFramePtr frame() const { return frame_; }
+
+ void updateFrame(AbstractFramePtr frame) { frame_ = frame; }
+
+ bool traceWeak(JSTracer* trc);
+};
+
+/****************************************************************************/
+
+/*
+ * [SMDOC] Debug environment objects
+ *
+ * The frontend optimizes unaliased variables into stack slots and can optimize
+ * away whole EnvironmentObjects. So when the debugger wants to perform an
+ * unexpected eval-in-frame (or otherwise access the environment),
+ * `fp->environmentChain` is often incomplete. This is a problem: a major use
+ * case for eval-in-frame is to access the local variables in debuggee code.
+ *
+ * Even when all EnvironmentObjects exist, giving complete information for all
+ * bindings, stack and heap, there's another issue: eval-in-frame code can
+ * create closures that capture stack locals. The variable slots go away when
+ * the frame is popped, but the closure, which uses them, may survive.
+ *
+ * To solve both problems, eval-in-frame code is compiled and run against a
+ * "debug environment chain" of DebugEnvironmentProxy objects rather than real
+ * EnvironmentObjects. The `GetDebugEnvironmentFor` functions below create
+ * these proxies, one to sit in front of each existing EnvironmentObject. They
+ * also create bogus "hollow" EnvironmentObjects to stand in for environments
+ * that were optimized away; and proxies for those. The frontend sees these
+ * environments as something like `with` scopes, and emits deoptimized bytecode
+ * instructions for all variable accesses.
+ *
+ * When eval-in-frame code runs, `fp->environmentChain` points to this chain of
+ * proxies. On each variable access, the proxy laboriously figures out what to
+ * do. See e.g. `DebuggerEnvironmentProxyHandler::handleUnaliasedAccess`.
+ *
+ * There's a limit to what the proxies can manage, since they're proxying
+ * environments that are already optimized. Some debugger operations, like
+ * redefining a lexical binding, can fail when a true direct eval would
+ * succeed. Even plain variable accesses can throw, if the variable has been
+ * optimized away.
+ *
+ * To support accessing stack variables after they've gone out of scope, we
+ * copy the variables to the heap as they leave scope. See
+ * `DebugEnvironments::onPopCall` and `onPopLexical`.
+ *
+ * `GetDebugEnvironmentFor*` guarantees that the same DebugEnvironmentProxy is
+ * always produced for the same underlying environment (optimized or not!).
+ * This is maintained by some bookkeeping information stored in
+ * `DebugEnvironments`.
+ */
+
+extern JSObject* GetDebugEnvironmentForFunction(JSContext* cx,
+ HandleFunction fun);
+
+extern JSObject* GetDebugEnvironmentForSuspendedGenerator(
+ JSContext* cx, JSScript* script, AbstractGeneratorObject& genObj);
+
+extern JSObject* GetDebugEnvironmentForFrame(JSContext* cx,
+ AbstractFramePtr frame,
+ jsbytecode* pc);
+
+extern JSObject* GetDebugEnvironmentForGlobalLexicalEnvironment(JSContext* cx);
+extern Scope* GetEnvironmentScope(const JSObject& env);
+
+/* Provides debugger access to a environment. */
+class DebugEnvironmentProxy : public ProxyObject {
+ /*
+ * The enclosing environment on the dynamic environment chain. This slot is
+ * analogous to the ENCLOSING_ENV_SLOT of a EnvironmentObject.
+ */
+ static const unsigned ENCLOSING_SLOT = 0;
+
+ /*
+ * NullValue or a dense array holding the unaliased variables of a function
+ * frame that has been popped.
+ */
+ static const unsigned SNAPSHOT_SLOT = 1;
+
+ public:
+ static DebugEnvironmentProxy* create(JSContext* cx, EnvironmentObject& env,
+ HandleObject enclosing);
+
+ // NOTE: The environment may be a debug hollow with invalid
+ // enclosingEnvironment. Always use the enclosingEnvironment accessor on
+ // the DebugEnvironmentProxy in order to walk the environment chain.
+ EnvironmentObject& environment() const;
+ JSObject& enclosingEnvironment() const;
+
+ // May only be called for proxies to function call objects or modules
+ // with top-level-await.
+ ArrayObject* maybeSnapshot() const;
+ void initSnapshot(ArrayObject& snapshot);
+
+ // Currently, the 'declarative' environments are function, module, and
+ // lexical environments.
+ bool isForDeclarative() const;
+
+ // Get a property by 'id', but returns sentinel values instead of throwing
+ // on exceptional cases.
+ static bool getMaybeSentinelValue(JSContext* cx,
+ Handle<DebugEnvironmentProxy*> env,
+ HandleId id, MutableHandleValue vp);
+
+ // Returns true iff this is a function environment with its own this-binding
+ // (all functions except arrow functions).
+ bool isFunctionEnvironmentWithThis();
+
+ // Does this debug environment not have a real counterpart or was never
+ // live (and thus does not have a synthesized EnvironmentObject or a
+ // snapshot)?
+ bool isOptimizedOut() const;
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ void dump();
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
+};
+
+/* Maintains per-realm debug environment bookkeeping information. */
+class DebugEnvironments {
+ Zone* zone_;
+
+ /* The map from (non-debug) environments to debug environments. */
+ ObjectWeakMap proxiedEnvs;
+
+ /*
+ * The map from live frames which have optimized-away environments to the
+ * corresponding debug environments.
+ */
+ typedef HashMap<MissingEnvironmentKey, WeakHeapPtr<DebugEnvironmentProxy*>,
+ MissingEnvironmentKey, ZoneAllocPolicy>
+ MissingEnvironmentMap;
+ MissingEnvironmentMap missingEnvs;
+
+ /*
+ * The map from environment objects of live frames to the live frame. This
+ * map updated lazily whenever the debugger needs the information. In
+ * between two lazy updates, liveEnvs becomes incomplete (but not invalid,
+ * onPop* removes environments as they are popped). Thus, two consecutive
+ * debugger lazy updates of liveEnvs need only fill in the new
+ * environments.
+ */
+ typedef GCHashMap<WeakHeapPtr<JSObject*>, LiveEnvironmentVal,
+ StableCellHasher<WeakHeapPtr<JSObject*>>, ZoneAllocPolicy>
+ LiveEnvironmentMap;
+ LiveEnvironmentMap liveEnvs;
+
+ public:
+ DebugEnvironments(JSContext* cx, Zone* zone);
+ ~DebugEnvironments();
+
+ Zone* zone() const { return zone_; }
+
+ private:
+ static DebugEnvironments* ensureRealmData(JSContext* cx);
+
+ template <typename Environment, typename Scope>
+ static void onPopGeneric(JSContext* cx, const EnvironmentIter& ei);
+
+ public:
+ void trace(JSTracer* trc);
+ void traceWeak(JSTracer* trc);
+ void finish();
+#ifdef JS_GC_ZEAL
+ void checkHashTablesAfterMovingGC();
+#endif
+
+ // If a live frame has a synthesized entry in missingEnvs, make sure it's not
+ // collected.
+ void traceLiveFrame(JSTracer* trc, AbstractFramePtr frame);
+
+ static DebugEnvironmentProxy* hasDebugEnvironment(JSContext* cx,
+ EnvironmentObject& env);
+ static bool addDebugEnvironment(JSContext* cx, Handle<EnvironmentObject*> env,
+ Handle<DebugEnvironmentProxy*> debugEnv);
+
+ static DebugEnvironmentProxy* hasDebugEnvironment(JSContext* cx,
+ const EnvironmentIter& ei);
+ static bool addDebugEnvironment(JSContext* cx, const EnvironmentIter& ei,
+ Handle<DebugEnvironmentProxy*> debugEnv);
+
+ static bool updateLiveEnvironments(JSContext* cx);
+ static LiveEnvironmentVal* hasLiveEnvironment(EnvironmentObject& env);
+ static void unsetPrevUpToDateUntil(JSContext* cx, AbstractFramePtr frame);
+
+ // When a frame bails out from Ion to Baseline, there might be missing
+ // envs keyed on, and live envs containing, the old
+ // RematerializedFrame. Forward those values to the new BaselineFrame.
+ static void forwardLiveFrame(JSContext* cx, AbstractFramePtr from,
+ AbstractFramePtr to);
+
+ // When an environment is popped, we store a snapshot of its bindings that
+ // live on the frame.
+ //
+ // This is done during frame unwinding, which cannot handle errors
+ // gracefully. Errors result in no snapshot being set on the
+ // DebugEnvironmentProxy.
+ static void takeFrameSnapshot(JSContext* cx,
+ Handle<DebugEnvironmentProxy*> debugEnv,
+ AbstractFramePtr frame);
+
+ // In debug-mode, these must be called whenever exiting a scope that might
+ // have stack-allocated locals.
+ static void onPopCall(JSContext* cx, AbstractFramePtr frame);
+ static void onPopVar(JSContext* cx, const EnvironmentIter& ei);
+ static void onPopLexical(JSContext* cx, const EnvironmentIter& ei);
+ static void onPopLexical(JSContext* cx, AbstractFramePtr frame,
+ const jsbytecode* pc);
+ static void onPopWith(AbstractFramePtr frame);
+ static void onPopModule(JSContext* cx, const EnvironmentIter& ei);
+ static void onRealmUnsetIsDebuggee(Realm* realm);
+};
+
+} /* namespace js */
+
+template <>
+inline bool JSObject::is<js::EnvironmentObject>() const {
+ return is<js::CallObject>() || is<js::VarEnvironmentObject>() ||
+ is<js::ModuleEnvironmentObject>() ||
+ is<js::WasmInstanceEnvironmentObject>() ||
+ is<js::WasmFunctionCallObject>() ||
+ is<js::LexicalEnvironmentObject>() ||
+ is<js::WithEnvironmentObject>() ||
+ is<js::NonSyntacticVariablesObject>() ||
+ is<js::RuntimeLexicalErrorObject>();
+}
+
+template <>
+inline bool JSObject::is<js::ScopedLexicalEnvironmentObject>() const {
+ return is<js::LexicalEnvironmentObject>() &&
+ !as<js::LexicalEnvironmentObject>().isExtensible();
+}
+
+template <>
+inline bool JSObject::is<js::BlockLexicalEnvironmentObject>() const {
+ return is<js::ScopedLexicalEnvironmentObject>() &&
+ !as<js::ScopedLexicalEnvironmentObject>().isClassBody();
+}
+
+template <>
+inline bool JSObject::is<js::ClassBodyLexicalEnvironmentObject>() const {
+ return is<js::ScopedLexicalEnvironmentObject>() &&
+ as<js::ScopedLexicalEnvironmentObject>().isClassBody();
+}
+
+template <>
+inline bool JSObject::is<js::ExtensibleLexicalEnvironmentObject>() const {
+ return is<js::LexicalEnvironmentObject>() &&
+ as<js::LexicalEnvironmentObject>().isExtensible();
+}
+
+template <>
+inline bool JSObject::is<js::GlobalLexicalEnvironmentObject>() const {
+ return is<js::LexicalEnvironmentObject>() &&
+ as<js::LexicalEnvironmentObject>().isGlobal();
+}
+
+template <>
+inline bool JSObject::is<js::NonSyntacticLexicalEnvironmentObject>() const {
+ return is<js::LexicalEnvironmentObject>() &&
+ !as<js::LexicalEnvironmentObject>().isSyntactic();
+}
+
+template <>
+inline bool JSObject::is<js::NamedLambdaObject>() const {
+ return is<js::BlockLexicalEnvironmentObject>() &&
+ as<js::BlockLexicalEnvironmentObject>().scope().isNamedLambda();
+}
+
+template <>
+bool JSObject::is<js::DebugEnvironmentProxy>() const;
+
+namespace js {
+
+inline bool IsSyntacticEnvironment(JSObject* env) {
+ if (!env->is<EnvironmentObject>()) {
+ return false;
+ }
+
+ if (env->is<WithEnvironmentObject>()) {
+ return env->as<WithEnvironmentObject>().isSyntactic();
+ }
+
+ if (env->is<LexicalEnvironmentObject>()) {
+ return env->as<LexicalEnvironmentObject>().isSyntactic();
+ }
+
+ if (env->is<NonSyntacticVariablesObject>()) {
+ return false;
+ }
+
+ return true;
+}
+
+inline bool IsExtensibleLexicalEnvironment(JSObject* env) {
+ return env->is<ExtensibleLexicalEnvironmentObject>();
+}
+
+inline bool IsGlobalLexicalEnvironment(JSObject* env) {
+ return env->is<GlobalLexicalEnvironmentObject>();
+}
+
+inline bool IsNSVOLexicalEnvironment(JSObject* env) {
+ return env->is<LexicalEnvironmentObject>() &&
+ env->as<LexicalEnvironmentObject>()
+ .enclosingEnvironment()
+ .is<NonSyntacticVariablesObject>();
+}
+
+inline JSObject* MaybeUnwrapWithEnvironment(JSObject* env) {
+ if (env->is<WithEnvironmentObject>()) {
+ return &env->as<WithEnvironmentObject>().object();
+ }
+ return env;
+}
+
+template <typename SpecificEnvironment>
+inline bool IsFrameInitialEnvironment(AbstractFramePtr frame,
+ SpecificEnvironment& env) {
+ // A frame's initial environment is the innermost environment
+ // corresponding to the scope chain from frame.script()->bodyScope() to
+ // frame.script()->outermostScope(). This environment must be on the chain
+ // for the frame to be considered initialized. That is, it must be on the
+ // chain for the environment chain to fully match the scope chain at the
+ // start of execution in the frame.
+ //
+ // This logic must be in sync with the HAS_INITIAL_ENV logic in
+ // BaselineStackBuilder::buildBaselineFrame.
+
+ // A function frame's CallObject, if present, is always the initial
+ // environment.
+ if constexpr (std::is_same_v<SpecificEnvironment, CallObject>) {
+ return true;
+ }
+
+ // For an eval frame, the VarEnvironmentObject, if present, is always the
+ // initial environment.
+ if constexpr (std::is_same_v<SpecificEnvironment, VarEnvironmentObject>) {
+ if (frame.isEvalFrame()) {
+ return true;
+ }
+ }
+
+ // For named lambda frames without CallObjects (i.e., no binding in the
+ // body of the function was closed over), the NamedLambdaObject
+ // corresponding to the named lambda scope is the initial environment.
+ if constexpr (std::is_same_v<SpecificEnvironment, NamedLambdaObject>) {
+ if (frame.isFunctionFrame() &&
+ frame.callee()->needsNamedLambdaEnvironment() &&
+ !frame.callee()->needsCallObject()) {
+ LexicalScope* namedLambdaScope = frame.script()->maybeNamedLambdaScope();
+ return &env.scope() == namedLambdaScope;
+ }
+ }
+
+ return false;
+}
+
+extern bool CreateObjectsForEnvironmentChain(JSContext* cx,
+ HandleObjectVector chain,
+ HandleObject terminatingEnv,
+ MutableHandleObject envObj);
+
+ModuleObject* GetModuleObjectForScript(JSScript* script);
+
+ModuleEnvironmentObject* GetModuleEnvironmentForScript(JSScript* script);
+
+[[nodiscard]] bool GetThisValueForDebuggerFrameMaybeOptimizedOut(
+ JSContext* cx, AbstractFramePtr frame, const jsbytecode* pc,
+ MutableHandleValue res);
+[[nodiscard]] bool GetThisValueForDebuggerSuspendedGeneratorMaybeOptimizedOut(
+ JSContext* cx, AbstractGeneratorObject& genObj, JSScript* script,
+ MutableHandleValue res);
+
+[[nodiscard]] bool CheckCanDeclareGlobalBinding(JSContext* cx,
+ Handle<GlobalObject*> global,
+ Handle<PropertyName*> name,
+ bool isFunction);
+
+[[nodiscard]] bool CheckLexicalNameConflict(
+ JSContext* cx, Handle<ExtensibleLexicalEnvironmentObject*> lexicalEnv,
+ HandleObject varObj, Handle<PropertyName*> name);
+
+[[nodiscard]] bool CheckGlobalDeclarationConflicts(
+ JSContext* cx, HandleScript script,
+ Handle<ExtensibleLexicalEnvironmentObject*> lexicalEnv,
+ HandleObject varObj);
+
+[[nodiscard]] bool GlobalOrEvalDeclInstantiation(JSContext* cx,
+ HandleObject envChain,
+ HandleScript script,
+ GCThingIndex lastFun);
+
+[[nodiscard]] bool InitFunctionEnvironmentObjects(JSContext* cx,
+ AbstractFramePtr frame);
+
+[[nodiscard]] bool PushVarEnvironmentObject(JSContext* cx, Handle<Scope*> scope,
+ AbstractFramePtr frame);
+
+[[nodiscard]] bool GetFrameEnvironmentAndScope(JSContext* cx,
+ AbstractFramePtr frame,
+ const jsbytecode* pc,
+ MutableHandleObject env,
+ MutableHandle<Scope*> scope);
+
+void GetSuspendedGeneratorEnvironmentAndScope(AbstractGeneratorObject& genObj,
+ JSScript* script,
+ MutableHandleObject env,
+ MutableHandle<Scope*> scope);
+
+#ifdef DEBUG
+bool AnalyzeEntrainedVariables(JSContext* cx, HandleScript script);
+#endif
+
+extern JSObject* MaybeOptimizeBindGlobalName(JSContext* cx,
+ Handle<GlobalObject*> global,
+ Handle<PropertyName*> name);
+} // namespace js
+
+#endif /* vm_EnvironmentObject_h */
diff --git a/js/src/vm/EqualityOperations.cpp b/js/src/vm/EqualityOperations.cpp
new file mode 100644
index 0000000000..66438879ce
--- /dev/null
+++ b/js/src/vm/EqualityOperations.cpp
@@ -0,0 +1,360 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/EqualityOperations.h" // js::LooselyEqual, js::StrictlyEqual, js::SameValue
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_ASSERT_IF
+
+#include "jsnum.h" // js::StringToNumber
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "js/Context.h" // js::AssertHeapIsIdle
+#include "js/Equality.h" // JS::LooselyEqual, JS::StrictlyEqual, JS::SameValue
+#include "js/Result.h" // JS_TRY_VAR_OR_RETURN_FALSE
+#include "js/RootingAPI.h" // JS::Rooted
+#include "js/Value.h" // JS::Int32Value, JS::SameType, JS::Value
+#include "vm/BigIntType.h" // JS::BigInt
+#include "vm/JSContext.h" // CHECK_THREAD
+#include "vm/JSObject.h" // js::ToPrimitive
+#include "vm/StringType.h" // js::EqualStrings
+#ifdef ENABLE_RECORD_TUPLE
+# include "vm/RecordType.h"
+# include "vm/TupleType.h"
+#endif
+
+#include "builtin/Boolean-inl.h" // js::EmulatesUndefined
+#include "vm/JSContext-inl.h" // JSContext::check
+
+static bool EqualGivenSameType(JSContext* cx, JS::Handle<JS::Value> lval,
+ JS::Handle<JS::Value> rval, bool* equal) {
+ MOZ_ASSERT(JS::SameType(lval, rval));
+
+ if (lval.isString()) {
+ return js::EqualStrings(cx, lval.toString(), rval.toString(), equal);
+ }
+
+ if (lval.isDouble()) {
+ *equal = (lval.toDouble() == rval.toDouble());
+ return true;
+ }
+
+ if (lval.isBigInt()) {
+ *equal = JS::BigInt::equal(lval.toBigInt(), rval.toBigInt());
+ return true;
+ }
+
+#ifdef ENABLE_RECORD_TUPLE
+ // Record & Tuple proposal, section 3.2.6 (Strict Equality Comparison), step 3
+ // - https://tc39.es/proposal-record-tuple/#sec-strict-equality-comparison
+ //
+ // When computing equality, records and tuples are compared using the
+ // SameValueZero algorithm.
+ //
+ // NOTE: Since Records and Tuples are impemented as ExtendedPrimitives,
+ // "SameType" refers to the fact that both lval and rval are
+ // ExtendedPrimitives. They can still be different types (for example, a
+ // Record and a Tuple).
+ if (lval.isExtendedPrimitive()) {
+ JSObject* lobj = &lval.toExtendedPrimitive();
+ JSObject* robj = &rval.toExtendedPrimitive();
+
+ if (lobj->getClass() != robj->getClass()) {
+ *equal = false;
+ return true;
+ }
+
+ if (lobj->is<js::RecordType>()) {
+ return js::RecordType::sameValueZero(cx, &lobj->as<js::RecordType>(),
+ &robj->as<js::RecordType>(), equal);
+ }
+ if (lobj->is<js::TupleType>()) {
+ return js::TupleType::sameValueZero(cx, &lobj->as<js::TupleType>(),
+ &robj->as<js::TupleType>(), equal);
+ }
+ MOZ_CRASH("Unknown ExtendedPrimitive type");
+ }
+#endif
+
+ // Note: we can do a bitwise comparison even for Int32Value because both
+ // Values have the same type.
+ MOZ_ASSERT(CanUseBitwiseCompareForStrictlyEqual(lval) || lval.isInt32());
+
+ *equal = (lval.asRawBits() == rval.asRawBits());
+ MOZ_ASSERT_IF(lval.isUndefined() || lval.isNull(), *equal);
+ return true;
+}
+
+static bool LooselyEqualBooleanAndOther(JSContext* cx,
+ JS::Handle<JS::Value> lval,
+ JS::Handle<JS::Value> rval,
+ bool* result) {
+ MOZ_ASSERT(!rval.isBoolean());
+
+ JS::Rooted<JS::Value> lvalue(cx, JS::Int32Value(lval.toBoolean() ? 1 : 0));
+
+ // The tail-call would end up in Step 3.
+ if (rval.isNumber()) {
+ *result = (lvalue.toNumber() == rval.toNumber());
+ return true;
+ }
+ // The tail-call would end up in Step 6.
+ if (rval.isString()) {
+ double num;
+ if (!StringToNumber(cx, rval.toString(), &num)) {
+ return false;
+ }
+ *result = (lvalue.toNumber() == num);
+ return true;
+ }
+
+ return js::LooselyEqual(cx, lvalue, rval, result);
+}
+
+// ES6 draft rev32 7.2.12 Abstract Equality Comparison
+bool js::LooselyEqual(JSContext* cx, JS::Handle<JS::Value> lval,
+ JS::Handle<JS::Value> rval, bool* result) {
+ // Step 3.
+ if (JS::SameType(lval, rval)) {
+ return EqualGivenSameType(cx, lval, rval, result);
+ }
+
+ // Handle int32 x double.
+ if (lval.isNumber() && rval.isNumber()) {
+ *result = (lval.toNumber() == rval.toNumber());
+ return true;
+ }
+
+ // Step 4. This a bit more complex, because of the undefined emulating object.
+ if (lval.isNullOrUndefined()) {
+ // We can return early here, because null | undefined is only equal to the
+ // same set.
+ *result = rval.isNullOrUndefined() ||
+ (rval.isObject() && EmulatesUndefined(&rval.toObject()));
+ return true;
+ }
+
+ // Step 5.
+ if (rval.isNullOrUndefined()) {
+ MOZ_ASSERT(!lval.isNullOrUndefined());
+ *result = lval.isObject() && EmulatesUndefined(&lval.toObject());
+ return true;
+ }
+
+ // Step 6.
+ if (lval.isNumber() && rval.isString()) {
+ double num;
+ if (!StringToNumber(cx, rval.toString(), &num)) {
+ return false;
+ }
+ *result = (lval.toNumber() == num);
+ return true;
+ }
+
+ // Step 7.
+ if (lval.isString() && rval.isNumber()) {
+ double num;
+ if (!StringToNumber(cx, lval.toString(), &num)) {
+ return false;
+ }
+ *result = (num == rval.toNumber());
+ return true;
+ }
+
+ // Step 8.
+ if (lval.isBoolean()) {
+ return LooselyEqualBooleanAndOther(cx, lval, rval, result);
+ }
+
+ // Step 9.
+ if (rval.isBoolean()) {
+ return LooselyEqualBooleanAndOther(cx, rval, lval, result);
+ }
+
+ // Step 10.
+ if ((lval.isString() || lval.isNumber() || lval.isSymbol()) &&
+ rval.isObject()) {
+ JS::Rooted<JS::Value> rvalue(cx, rval);
+ if (!ToPrimitive(cx, &rvalue)) {
+ return false;
+ }
+ return js::LooselyEqual(cx, lval, rvalue, result);
+ }
+
+ // Step 11.
+ if (lval.isObject() &&
+ (rval.isString() || rval.isNumber() || rval.isSymbol())) {
+ JS::Rooted<JS::Value> lvalue(cx, lval);
+ if (!ToPrimitive(cx, &lvalue)) {
+ return false;
+ }
+ return js::LooselyEqual(cx, lvalue, rval, result);
+ }
+
+ if (lval.isBigInt()) {
+ JS::Rooted<JS::BigInt*> lbi(cx, lval.toBigInt());
+ bool tmpResult;
+ JS_TRY_VAR_OR_RETURN_FALSE(cx, tmpResult,
+ JS::BigInt::looselyEqual(cx, lbi, rval));
+ *result = tmpResult;
+ return true;
+ }
+
+ if (rval.isBigInt()) {
+ JS::Rooted<JS::BigInt*> rbi(cx, rval.toBigInt());
+ bool tmpResult;
+ JS_TRY_VAR_OR_RETURN_FALSE(cx, tmpResult,
+ JS::BigInt::looselyEqual(cx, rbi, lval));
+ *result = tmpResult;
+ return true;
+ }
+
+ // Step 12.
+ *result = false;
+ return true;
+}
+
+JS_PUBLIC_API bool JS::LooselyEqual(JSContext* cx, Handle<Value> value1,
+ Handle<Value> value2, bool* equal) {
+ js::AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(value1, value2);
+ MOZ_ASSERT(equal);
+ return js::LooselyEqual(cx, value1, value2, equal);
+}
+
+bool js::StrictlyEqual(JSContext* cx, JS::Handle<JS::Value> lval,
+ JS::Handle<JS::Value> rval, bool* equal) {
+ if (SameType(lval, rval)) {
+ return EqualGivenSameType(cx, lval, rval, equal);
+ }
+
+ if (lval.isNumber() && rval.isNumber()) {
+ *equal = (lval.toNumber() == rval.toNumber());
+ return true;
+ }
+
+ *equal = false;
+ return true;
+}
+
+JS_PUBLIC_API bool JS::StrictlyEqual(JSContext* cx, Handle<Value> value1,
+ Handle<Value> value2, bool* equal) {
+ js::AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(value1, value2);
+ MOZ_ASSERT(equal);
+ return js::StrictlyEqual(cx, value1, value2, equal);
+}
+
+static inline bool IsNegativeZero(const JS::Value& v) {
+ return v.isDouble() && mozilla::IsNegativeZero(v.toDouble());
+}
+
+static inline bool IsNaN(const JS::Value& v) {
+ return v.isDouble() && std::isnan(v.toDouble());
+}
+
+bool js::SameValue(JSContext* cx, JS::Handle<JS::Value> v1,
+ JS::Handle<JS::Value> v2, bool* same) {
+ if (IsNegativeZero(v1)) {
+ *same = IsNegativeZero(v2);
+ return true;
+ }
+
+ if (IsNegativeZero(v2)) {
+ *same = false;
+ return true;
+ }
+
+#ifdef ENABLE_RECORD_TUPLE
+ if (v1.isExtendedPrimitive()) {
+ JSObject* lobj = &v1.toExtendedPrimitive();
+ JSObject* robj = &v2.toExtendedPrimitive();
+
+ if (lobj->getClass() != robj->getClass()) {
+ *same = false;
+ return true;
+ }
+
+ if (lobj->is<js::RecordType>()) {
+ return js::RecordType::sameValue(cx, &lobj->as<js::RecordType>(),
+ &robj->as<js::RecordType>(), same);
+ }
+ if (lobj->is<js::TupleType>()) {
+ return js::TupleType::sameValue(cx, &lobj->as<js::TupleType>(),
+ &robj->as<js::TupleType>(), same);
+ }
+ MOZ_CRASH("Unknown ExtendedPrimitive type");
+ }
+#endif
+
+ return js::SameValueZero(cx, v1, v2, same);
+}
+
+#ifdef ENABLE_RECORD_TUPLE
+bool js::SameValueZeroLinear(const JS::Value& lval, const JS::Value& rval) {
+ if (lval.isNumber() && rval.isNumber()) {
+ return IsNaN(lval) ? IsNaN(rval) : lval.toNumber() == rval.toNumber();
+ }
+
+ if (lval.type() != rval.type()) {
+ return false;
+ }
+
+ switch (lval.type()) {
+ case ValueType::Double:
+ return IsNaN(lval) ? IsNaN(rval) : lval.toDouble() == rval.toDouble();
+
+ case ValueType::BigInt:
+ // BigInt values are considered equal if they represent the same
+ // mathematical value.
+ return BigInt::equal(lval.toBigInt(), rval.toBigInt());
+
+ case ValueType::String:
+ MOZ_ASSERT(lval.toString()->isLinear() && rval.toString()->isLinear());
+ return EqualStrings(&lval.toString()->asLinear(),
+ &rval.toString()->asLinear());
+
+ case ValueType::ExtendedPrimitive: {
+ JSObject& lobj = lval.toExtendedPrimitive();
+ JSObject& robj = rval.toExtendedPrimitive();
+ if (lobj.getClass() != robj.getClass()) {
+ return false;
+ }
+ if (lobj.is<RecordType>()) {
+ return RecordType::sameValueZero(&lobj.as<RecordType>(),
+ &robj.as<RecordType>());
+ }
+ MOZ_ASSERT(lobj.is<TupleType>());
+ return TupleType::sameValueZero(&lobj.as<TupleType>(),
+ &robj.as<TupleType>());
+ }
+
+ default:
+ MOZ_ASSERT(CanUseBitwiseCompareForStrictlyEqual(lval));
+ return lval.asRawBits() == rval.asRawBits();
+ }
+}
+#endif
+
+JS_PUBLIC_API bool JS::SameValue(JSContext* cx, Handle<Value> value1,
+ Handle<Value> value2, bool* same) {
+ js::AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(value1, value2);
+ MOZ_ASSERT(same);
+ return js::SameValue(cx, value1, value2, same);
+}
+
+bool js::SameValueZero(JSContext* cx, Handle<Value> v1, Handle<Value> v2,
+ bool* same) {
+ if (IsNaN(v1) && IsNaN(v2)) {
+ *same = true;
+ return true;
+ }
+
+ return js::StrictlyEqual(cx, v1, v2, same);
+}
diff --git a/js/src/vm/EqualityOperations.h b/js/src/vm/EqualityOperations.h
new file mode 100644
index 0000000000..f08f179730
--- /dev/null
+++ b/js/src/vm/EqualityOperations.h
@@ -0,0 +1,72 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * The equality comparisons of js/Equality.h, but with extra efficiency for
+ * SpiderMonkey-internal callers.
+ *
+ * These functions, assuming they're passed C++-valid arguments, are identical
+ * to the same-named JS::-namespaced functions -- just with hidden linkage (so
+ * they're more efficient to call), and without various external-caller-focused
+ * JSAPI-usage assertions performed that SpiderMonkey users never come close to
+ * failing.
+ */
+
+#ifndef vm_EqualityOperations_h
+#define vm_EqualityOperations_h
+
+#include "jstypes.h" // JS_PUBLIC_API
+#include "js/RootingAPI.h" // JS::Handle
+#include "js/Value.h" // JS::Value
+
+struct JS_PUBLIC_API JSContext;
+
+namespace js {
+
+/** Computes |lval === rval|. */
+extern bool StrictlyEqual(JSContext* cx, JS::Handle<JS::Value> lval,
+ JS::Handle<JS::Value> rval, bool* equal);
+
+/** Computes |lval == rval|. */
+extern bool LooselyEqual(JSContext* cx, JS::Handle<JS::Value> lval,
+ JS::Handle<JS::Value> rval, bool* equal);
+
+/**
+ * Computes |SameValue(v1, v2)| -- strict equality except that NaNs are
+ * considered equal and opposite-signed zeroes are considered unequal.
+ */
+extern bool SameValue(JSContext* cx, JS::Handle<JS::Value> v1,
+ JS::Handle<JS::Value> v2, bool* same);
+
+#ifdef ENABLE_RECORD_TUPLE
+/**
+ * Computes |SameValue(v1, v2)|, but it expects that strings, records and
+ * tuples are all linear.
+ */
+extern bool SameValueZeroLinear(const JS::Value& lval, const JS::Value& rval);
+#endif
+
+/**
+ * Computes |SameValueZero(v1, v2)| -- strict equality except that NaNs are
+ * considered equal. Opposite-signed zeroes are considered equal.
+ */
+extern bool SameValueZero(JSContext* cx, JS::Handle<JS::Value> v1,
+ JS::Handle<JS::Value> v2, bool* same);
+
+/*
+ * Whether strict equality of a JS::Value (with any other JS::Value) can be
+ * implemented by comparing the raw bits, Value::asRawBits().
+ *
+ * Note that this does not include Int32Value, because DoubleValue can store
+ * integers too.
+ */
+inline bool CanUseBitwiseCompareForStrictlyEqual(const JS::Value& v) {
+ return v.isObject() || v.isSymbol() || v.isNullOrUndefined() || v.isBoolean();
+}
+
+} // namespace js
+
+#endif // vm_EqualityOperations_h
diff --git a/js/src/vm/ErrorMessages.cpp b/js/src/vm/ErrorMessages.cpp
new file mode 100644
index 0000000000..9f65798956
--- /dev/null
+++ b/js/src/vm/ErrorMessages.cpp
@@ -0,0 +1,29 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* SpiderMonkey-internal error-reporting formatting functionality. */
+
+#include "js/friend/ErrorMessages.h"
+
+#include "jsexn.h" // js_ErrorFormatString
+
+#include "js/ErrorReport.h" // JSErrorFormatString
+
+const JSErrorFormatString js_ErrorFormatString[JSErr_Limit] = {
+#define MSG_DEF(name, count, exception, format) \
+ {#name, format, count, exception},
+#include "js/friend/ErrorNumbers.msg"
+#undef MSG_DEF
+};
+
+const JSErrorFormatString* js::GetErrorMessage(void* userRef,
+ unsigned errorNumber) {
+ if (errorNumber > 0 && errorNumber < JSErr_Limit) {
+ return &js_ErrorFormatString[errorNumber];
+ }
+
+ return nullptr;
+}
diff --git a/js/src/vm/ErrorObject-inl.h b/js/src/vm/ErrorObject-inl.h
new file mode 100644
index 0000000000..9da8e3fc7a
--- /dev/null
+++ b/js/src/vm/ErrorObject-inl.h
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ErrorObject_inl_h
+#define vm_ErrorObject_inl_h
+
+#include "vm/ErrorObject.h"
+
+#include "vm/JSAtomState.h"
+#include "vm/JSContext.h"
+
+inline JSString* js::ErrorObject::fileName(JSContext* cx) const {
+ Value val = getReservedSlot(FILENAME_SLOT);
+ return val.isString() ? val.toString() : cx->names().empty;
+}
+
+inline uint32_t js::ErrorObject::sourceId() const {
+ Value val = getReservedSlot(SOURCEID_SLOT);
+ return val.isInt32() ? val.toInt32() : 0;
+}
+
+inline uint32_t js::ErrorObject::lineNumber() const {
+ Value val = getReservedSlot(LINENUMBER_SLOT);
+ return val.isInt32() ? val.toInt32() : 0;
+}
+
+inline uint32_t js::ErrorObject::columnNumber() const {
+ Value val = getReservedSlot(COLUMNNUMBER_SLOT);
+ return val.isInt32() ? val.toInt32() : 0;
+}
+
+inline JSObject* js::ErrorObject::stack() const {
+ return getReservedSlot(STACK_SLOT).toObjectOrNull();
+}
+
+#endif /* vm_ErrorObject_inl_h */
diff --git a/js/src/vm/ErrorObject.cpp b/js/src/vm/ErrorObject.cpp
new file mode 100644
index 0000000000..5c9b9018a5
--- /dev/null
+++ b/js/src/vm/ErrorObject.cpp
@@ -0,0 +1,814 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sw=2 et tw=80:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/ErrorObject-inl.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include <utility>
+
+#include "jsexn.h"
+#include "jspubtd.h"
+#include "NamespaceImports.h"
+
+#include "gc/AllocKind.h"
+#include "gc/GCContext.h"
+#include "js/CallArgs.h"
+#include "js/CallNonGenericMethod.h"
+#include "js/CharacterEncoding.h"
+#include "js/Class.h"
+#include "js/Conversions.h"
+#include "js/ErrorReport.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
+#include "js/PropertySpec.h"
+#include "js/RootingAPI.h"
+#include "js/Stack.h"
+#include "js/TypeDecls.h"
+#include "js/Utility.h"
+#include "js/Value.h"
+#include "js/Wrapper.h"
+#include "util/StringBuffer.h"
+#include "vm/GlobalObject.h"
+#include "vm/Iteration.h"
+#include "vm/JSAtom.h"
+#include "vm/JSFunction.h"
+#include "vm/JSObject.h"
+#include "vm/NativeObject.h"
+#include "vm/ObjectOperations.h"
+#include "vm/SavedStacks.h"
+#include "vm/SelfHosting.h"
+#include "vm/Shape.h"
+#include "vm/Stack.h"
+#include "vm/StringType.h"
+#include "vm/ToSource.h" // js::ValueToSource
+#include "vm/WellKnownAtom.h" // js_*_str
+
+#include "vm/JSContext-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/ObjectOperations-inl.h"
+#include "vm/SavedStacks-inl.h"
+#include "vm/Shape-inl.h"
+
+using namespace js;
+
+#define IMPLEMENT_ERROR_PROTO_CLASS(name) \
+ { \
+ #name ".prototype", JSCLASS_HAS_CACHED_PROTO(JSProto_##name), \
+ JS_NULL_CLASS_OPS, \
+ &ErrorObject::classSpecs[JSProto_##name - JSProto_Error] \
+ }
+
+const JSClass ErrorObject::protoClasses[JSEXN_ERROR_LIMIT] = {
+ IMPLEMENT_ERROR_PROTO_CLASS(Error),
+
+ IMPLEMENT_ERROR_PROTO_CLASS(InternalError),
+ IMPLEMENT_ERROR_PROTO_CLASS(AggregateError),
+ IMPLEMENT_ERROR_PROTO_CLASS(EvalError),
+ IMPLEMENT_ERROR_PROTO_CLASS(RangeError),
+ IMPLEMENT_ERROR_PROTO_CLASS(ReferenceError),
+ IMPLEMENT_ERROR_PROTO_CLASS(SyntaxError),
+ IMPLEMENT_ERROR_PROTO_CLASS(TypeError),
+ IMPLEMENT_ERROR_PROTO_CLASS(URIError),
+
+ IMPLEMENT_ERROR_PROTO_CLASS(DebuggeeWouldRun),
+ IMPLEMENT_ERROR_PROTO_CLASS(CompileError),
+ IMPLEMENT_ERROR_PROTO_CLASS(LinkError),
+ IMPLEMENT_ERROR_PROTO_CLASS(RuntimeError)};
+
+static bool exn_toSource(JSContext* cx, unsigned argc, Value* vp);
+
+static const JSFunctionSpec error_methods[] = {
+ JS_FN(js_toSource_str, exn_toSource, 0, 0),
+ JS_SELF_HOSTED_FN(js_toString_str, "ErrorToString", 0, 0), JS_FS_END};
+
+// Error.prototype and NativeError.prototype have own .message and .name
+// properties.
+#define COMMON_ERROR_PROPERTIES(name) \
+ JS_STRING_PS("message", "", 0), JS_STRING_PS("name", #name, 0)
+
+static const JSPropertySpec error_properties[] = {
+ COMMON_ERROR_PROPERTIES(Error),
+ // Only Error.prototype has .stack!
+ JS_PSGS("stack", ErrorObject::getStack, ErrorObject::setStack, 0),
+ JS_PS_END};
+
+#define IMPLEMENT_NATIVE_ERROR_PROPERTIES(name) \
+ static const JSPropertySpec name##_properties[] = { \
+ COMMON_ERROR_PROPERTIES(name), JS_PS_END};
+
+IMPLEMENT_NATIVE_ERROR_PROPERTIES(InternalError)
+IMPLEMENT_NATIVE_ERROR_PROPERTIES(AggregateError)
+IMPLEMENT_NATIVE_ERROR_PROPERTIES(EvalError)
+IMPLEMENT_NATIVE_ERROR_PROPERTIES(RangeError)
+IMPLEMENT_NATIVE_ERROR_PROPERTIES(ReferenceError)
+IMPLEMENT_NATIVE_ERROR_PROPERTIES(SyntaxError)
+IMPLEMENT_NATIVE_ERROR_PROPERTIES(TypeError)
+IMPLEMENT_NATIVE_ERROR_PROPERTIES(URIError)
+IMPLEMENT_NATIVE_ERROR_PROPERTIES(DebuggeeWouldRun)
+IMPLEMENT_NATIVE_ERROR_PROPERTIES(CompileError)
+IMPLEMENT_NATIVE_ERROR_PROPERTIES(LinkError)
+IMPLEMENT_NATIVE_ERROR_PROPERTIES(RuntimeError)
+
+#define IMPLEMENT_NATIVE_ERROR_SPEC(name) \
+ { \
+ ErrorObject::createConstructor, ErrorObject::createProto, nullptr, \
+ nullptr, nullptr, name##_properties, nullptr, JSProto_Error \
+ }
+
+#define IMPLEMENT_NONGLOBAL_ERROR_SPEC(name) \
+ { \
+ ErrorObject::createConstructor, ErrorObject::createProto, nullptr, \
+ nullptr, nullptr, name##_properties, nullptr, \
+ JSProto_Error | ClassSpec::DontDefineConstructor \
+ }
+
+const ClassSpec ErrorObject::classSpecs[JSEXN_ERROR_LIMIT] = {
+ {ErrorObject::createConstructor, ErrorObject::createProto, nullptr, nullptr,
+ error_methods, error_properties},
+
+ IMPLEMENT_NATIVE_ERROR_SPEC(InternalError),
+ IMPLEMENT_NATIVE_ERROR_SPEC(AggregateError),
+ IMPLEMENT_NATIVE_ERROR_SPEC(EvalError),
+ IMPLEMENT_NATIVE_ERROR_SPEC(RangeError),
+ IMPLEMENT_NATIVE_ERROR_SPEC(ReferenceError),
+ IMPLEMENT_NATIVE_ERROR_SPEC(SyntaxError),
+ IMPLEMENT_NATIVE_ERROR_SPEC(TypeError),
+ IMPLEMENT_NATIVE_ERROR_SPEC(URIError),
+
+ IMPLEMENT_NONGLOBAL_ERROR_SPEC(DebuggeeWouldRun),
+ IMPLEMENT_NONGLOBAL_ERROR_SPEC(CompileError),
+ IMPLEMENT_NONGLOBAL_ERROR_SPEC(LinkError),
+ IMPLEMENT_NONGLOBAL_ERROR_SPEC(RuntimeError)};
+
+#define IMPLEMENT_ERROR_CLASS_CORE(name, reserved_slots) \
+ { \
+ #name, \
+ JSCLASS_HAS_CACHED_PROTO(JSProto_##name) | \
+ JSCLASS_HAS_RESERVED_SLOTS(reserved_slots) | \
+ JSCLASS_BACKGROUND_FINALIZE, \
+ &ErrorObjectClassOps, \
+ &ErrorObject::classSpecs[JSProto_##name - JSProto_Error] \
+ }
+
+#define IMPLEMENT_ERROR_CLASS(name) \
+ IMPLEMENT_ERROR_CLASS_CORE(name, ErrorObject::RESERVED_SLOTS)
+
+// Only used for classes that could be a Wasm trap. Classes that use this
+// macro should be kept in sync with the exception types that mightBeWasmTrap()
+// will return true for.
+#define IMPLEMENT_ERROR_CLASS_MAYBE_WASM_TRAP(name) \
+ IMPLEMENT_ERROR_CLASS_CORE(name, ErrorObject::RESERVED_SLOTS_MAYBE_WASM_TRAP)
+
+static void exn_finalize(JS::GCContext* gcx, JSObject* obj);
+
+static const JSClassOps ErrorObjectClassOps = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ exn_finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const JSClass ErrorObject::classes[JSEXN_ERROR_LIMIT] = {
+ IMPLEMENT_ERROR_CLASS(Error),
+ IMPLEMENT_ERROR_CLASS_MAYBE_WASM_TRAP(InternalError),
+ IMPLEMENT_ERROR_CLASS(AggregateError), IMPLEMENT_ERROR_CLASS(EvalError),
+ IMPLEMENT_ERROR_CLASS(RangeError), IMPLEMENT_ERROR_CLASS(ReferenceError),
+ IMPLEMENT_ERROR_CLASS(SyntaxError), IMPLEMENT_ERROR_CLASS(TypeError),
+ IMPLEMENT_ERROR_CLASS(URIError),
+ // These Error subclasses are not accessible via the global object:
+ IMPLEMENT_ERROR_CLASS(DebuggeeWouldRun),
+ IMPLEMENT_ERROR_CLASS(CompileError), IMPLEMENT_ERROR_CLASS(LinkError),
+ IMPLEMENT_ERROR_CLASS_MAYBE_WASM_TRAP(RuntimeError)};
+
+static void exn_finalize(JS::GCContext* gcx, JSObject* obj) {
+ if (JSErrorReport* report = obj->as<ErrorObject>().getErrorReport()) {
+ // Bug 1560019: This allocation is not currently tracked.
+ gcx->deleteUntracked(report);
+ }
+}
+
+static ErrorObject* CreateErrorObject(JSContext* cx, const CallArgs& args,
+ unsigned messageArg, JSExnType exnType,
+ HandleObject proto) {
+ // Compute the error message, if any.
+ RootedString message(cx, nullptr);
+ if (args.hasDefined(messageArg)) {
+ message = ToString<CanGC>(cx, args[messageArg]);
+ if (!message) {
+ return nullptr;
+ }
+ }
+
+ // Don't interpret the two parameters following the message parameter as the
+ // non-standard fileName and lineNumber arguments when we have an options
+ // object argument.
+ bool hasOptions = args.get(messageArg + 1).isObject();
+
+ Rooted<mozilla::Maybe<Value>> cause(cx, mozilla::Nothing());
+ if (hasOptions) {
+ RootedObject options(cx, &args[messageArg + 1].toObject());
+
+ bool hasCause = false;
+ if (!HasProperty(cx, options, cx->names().cause, &hasCause)) {
+ return nullptr;
+ }
+
+ if (hasCause) {
+ RootedValue causeValue(cx);
+ if (!GetProperty(cx, options, options, cx->names().cause, &causeValue)) {
+ return nullptr;
+ }
+ cause = mozilla::Some(causeValue.get());
+ }
+ }
+
+ // Find the scripted caller, but only ones we're allowed to know about.
+ NonBuiltinFrameIter iter(cx, cx->realm()->principals());
+
+ RootedString fileName(cx);
+ uint32_t sourceId = 0;
+ if (!hasOptions && args.length() > messageArg + 1) {
+ fileName = ToString<CanGC>(cx, args[messageArg + 1]);
+ } else {
+ fileName = cx->runtime()->emptyString;
+ if (!iter.done()) {
+ if (const char* cfilename = iter.filename()) {
+ fileName = JS_NewStringCopyUTF8Z(
+ cx, JS::ConstUTF8CharsZ(cfilename, strlen(cfilename)));
+ }
+ if (iter.hasScript()) {
+ sourceId = iter.script()->scriptSource()->id();
+ }
+ }
+ }
+ if (!fileName) {
+ return nullptr;
+ }
+
+ uint32_t lineNumber, columnNumber = 0;
+ if (!hasOptions && args.length() > messageArg + 2) {
+ if (!ToUint32(cx, args[messageArg + 2], &lineNumber)) {
+ return nullptr;
+ }
+ } else {
+ lineNumber = iter.done() ? 0 : iter.computeLine(&columnNumber);
+ columnNumber = FixupColumnForDisplay(columnNumber);
+ }
+
+ RootedObject stack(cx);
+ if (!CaptureStack(cx, &stack)) {
+ return nullptr;
+ }
+
+ return ErrorObject::create(cx, exnType, stack, fileName, sourceId, lineNumber,
+ columnNumber, nullptr, message, cause, proto);
+}
+
+static bool Error(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // ECMA ed. 3, 15.11.1 requires Error, etc., to construct even when
+ // called as functions, without operator new. But as we do not give
+ // each constructor a distinct JSClass, we must get the exception type
+ // ourselves.
+ JSExnType exnType =
+ JSExnType(args.callee().as<JSFunction>().getExtendedSlot(0).toInt32());
+
+ MOZ_ASSERT(exnType != JSEXN_AGGREGATEERR,
+ "AggregateError has its own constructor function");
+
+ JSProtoKey protoKey =
+ JSCLASS_CACHED_PROTO_KEY(&ErrorObject::classes[exnType]);
+
+ // ES6 19.5.1.1 mandates the .prototype lookup happens before the toString
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, protoKey, &proto)) {
+ return false;
+ }
+
+ auto* obj = CreateErrorObject(cx, args, 0, exnType, proto);
+ if (!obj) {
+ return false;
+ }
+
+ args.rval().setObject(*obj);
+ return true;
+}
+
+// AggregateError ( errors, message )
+static bool AggregateError(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ mozilla::DebugOnly<JSExnType> exnType =
+ JSExnType(args.callee().as<JSFunction>().getExtendedSlot(0).toInt32());
+
+ MOZ_ASSERT(exnType == JSEXN_AGGREGATEERR);
+
+ // Steps 1-2. (9.1.13 OrdinaryCreateFromConstructor, steps 1-2).
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_AggregateError,
+ &proto)) {
+ return false;
+ }
+
+ // TypeError anyway, but this gives a better error message.
+ if (!args.requireAtLeast(cx, "AggregateError", 1)) {
+ return false;
+ }
+
+ // 9.1.13 OrdinaryCreateFromConstructor, step 3.
+ // Step 3.
+ Rooted<ErrorObject*> obj(
+ cx, CreateErrorObject(cx, args, 1, JSEXN_AGGREGATEERR, proto));
+ if (!obj) {
+ return false;
+ }
+
+ // Step 4.
+
+ Rooted<ArrayObject*> errorsList(cx);
+ if (!IterableToArray(cx, args.get(0), &errorsList)) {
+ return false;
+ }
+
+ // Step 5.
+ RootedValue errorsVal(cx, JS::ObjectValue(*errorsList));
+ if (!NativeDefineDataProperty(cx, obj, cx->names().errors, errorsVal, 0)) {
+ return false;
+ }
+
+ // Step 6.
+ args.rval().setObject(*obj);
+ return true;
+}
+
+/* static */
+JSObject* ErrorObject::createProto(JSContext* cx, JSProtoKey key) {
+ JSExnType type = ExnTypeFromProtoKey(key);
+
+ if (type == JSEXN_ERR) {
+ return GlobalObject::createBlankPrototype(
+ cx, cx->global(), &ErrorObject::protoClasses[JSEXN_ERR]);
+ }
+
+ RootedObject protoProto(
+ cx, GlobalObject::getOrCreateErrorPrototype(cx, cx->global()));
+ if (!protoProto) {
+ return nullptr;
+ }
+
+ return GlobalObject::createBlankPrototypeInheriting(
+ cx, &ErrorObject::protoClasses[type], protoProto);
+}
+
+/* static */
+JSObject* ErrorObject::createConstructor(JSContext* cx, JSProtoKey key) {
+ JSExnType type = ExnTypeFromProtoKey(key);
+ RootedObject ctor(cx);
+
+ if (type == JSEXN_ERR) {
+ ctor = GenericCreateConstructor<Error, 1, gc::AllocKind::FUNCTION_EXTENDED>(
+ cx, key);
+ } else {
+ RootedFunction proto(
+ cx, GlobalObject::getOrCreateErrorConstructor(cx, cx->global()));
+ if (!proto) {
+ return nullptr;
+ }
+
+ Native native;
+ unsigned nargs;
+ if (type == JSEXN_AGGREGATEERR) {
+ native = AggregateError;
+ nargs = 2;
+ } else {
+ native = Error;
+ nargs = 1;
+ }
+
+ ctor =
+ NewFunctionWithProto(cx, native, nargs, FunctionFlags::NATIVE_CTOR,
+ nullptr, ClassName(key, cx), proto,
+ gc::AllocKind::FUNCTION_EXTENDED, TenuredObject);
+ }
+
+ if (!ctor) {
+ return nullptr;
+ }
+
+ ctor->as<JSFunction>().setExtendedSlot(0, Int32Value(type));
+ return ctor;
+}
+
+/* static */
+SharedShape* js::ErrorObject::assignInitialShape(JSContext* cx,
+ Handle<ErrorObject*> obj) {
+ MOZ_ASSERT(obj->empty());
+
+ constexpr PropertyFlags propFlags = {PropertyFlag::Configurable,
+ PropertyFlag::Writable};
+
+ if (!NativeObject::addPropertyInReservedSlot(cx, obj, cx->names().fileName,
+ FILENAME_SLOT, propFlags)) {
+ return nullptr;
+ }
+
+ if (!NativeObject::addPropertyInReservedSlot(cx, obj, cx->names().lineNumber,
+ LINENUMBER_SLOT, propFlags)) {
+ return nullptr;
+ }
+
+ if (!NativeObject::addPropertyInReservedSlot(
+ cx, obj, cx->names().columnNumber, COLUMNNUMBER_SLOT, propFlags)) {
+ return nullptr;
+ }
+
+ return obj->sharedShape();
+}
+
+/* static */
+bool js::ErrorObject::init(JSContext* cx, Handle<ErrorObject*> obj,
+ JSExnType type, UniquePtr<JSErrorReport> errorReport,
+ HandleString fileName, HandleObject stack,
+ uint32_t sourceId, uint32_t lineNumber,
+ uint32_t columnNumber, HandleString message,
+ Handle<mozilla::Maybe<JS::Value>> cause) {
+ MOZ_ASSERT(JSEXN_ERR <= type && type < JSEXN_ERROR_LIMIT);
+ AssertObjectIsSavedFrameOrWrapper(cx, stack);
+ cx->check(obj, stack);
+
+ // Null out early in case of error, for exn_finalize's sake.
+ obj->initReservedSlot(ERROR_REPORT_SLOT, PrivateValue(nullptr));
+
+ if (!SharedShape::ensureInitialCustomShape<ErrorObject>(cx, obj)) {
+ return false;
+ }
+
+ // The .message property isn't part of the initial shape because it's
+ // present in some error objects -- |Error.prototype|, |new Error("f")|,
+ // |new Error("")| -- but not in others -- |new Error(undefined)|,
+ // |new Error()|.
+ if (message) {
+ constexpr PropertyFlags propFlags = {PropertyFlag::Configurable,
+ PropertyFlag::Writable};
+ if (!NativeObject::addPropertyInReservedSlot(cx, obj, cx->names().message,
+ MESSAGE_SLOT, propFlags)) {
+ return false;
+ }
+ }
+
+ // Similar to the .message property, .cause is present only in some error
+ // objects -- |new Error("f", {cause: cause})| -- but not in other --
+ // |Error.prototype|, |new Error()|, |new Error("f")|.
+ if (cause.isSome()) {
+ constexpr PropertyFlags propFlags = {PropertyFlag::Configurable,
+ PropertyFlag::Writable};
+ if (!NativeObject::addPropertyInReservedSlot(cx, obj, cx->names().cause,
+ CAUSE_SLOT, propFlags)) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(obj->lookupPure(NameToId(cx->names().fileName))->slot() ==
+ FILENAME_SLOT);
+ MOZ_ASSERT(obj->lookupPure(NameToId(cx->names().lineNumber))->slot() ==
+ LINENUMBER_SLOT);
+ MOZ_ASSERT(obj->lookupPure(NameToId(cx->names().columnNumber))->slot() ==
+ COLUMNNUMBER_SLOT);
+ MOZ_ASSERT_IF(
+ message,
+ obj->lookupPure(NameToId(cx->names().message))->slot() == MESSAGE_SLOT);
+ MOZ_ASSERT_IF(
+ cause.isSome(),
+ obj->lookupPure(NameToId(cx->names().cause))->slot() == CAUSE_SLOT);
+
+ JSErrorReport* report = errorReport.release();
+ obj->initReservedSlot(STACK_SLOT, ObjectOrNullValue(stack));
+ obj->setReservedSlot(ERROR_REPORT_SLOT, PrivateValue(report));
+ obj->initReservedSlot(FILENAME_SLOT, StringValue(fileName));
+ obj->initReservedSlot(LINENUMBER_SLOT, Int32Value(lineNumber));
+ obj->initReservedSlot(COLUMNNUMBER_SLOT, Int32Value(columnNumber));
+ if (message) {
+ obj->initReservedSlot(MESSAGE_SLOT, StringValue(message));
+ }
+ if (cause.isSome()) {
+ obj->initReservedSlot(CAUSE_SLOT, *cause.get());
+ } else {
+ obj->initReservedSlot(CAUSE_SLOT, MagicValue(JS_ERROR_WITHOUT_CAUSE));
+ }
+ obj->initReservedSlot(SOURCEID_SLOT, Int32Value(sourceId));
+ if (obj->mightBeWasmTrap()) {
+ MOZ_ASSERT(JSCLASS_RESERVED_SLOTS(obj->getClass()) > WASM_TRAP_SLOT);
+ obj->initReservedSlot(WASM_TRAP_SLOT, BooleanValue(false));
+ }
+
+ return true;
+}
+
+/* static */
+ErrorObject* js::ErrorObject::create(JSContext* cx, JSExnType errorType,
+ HandleObject stack, HandleString fileName,
+ uint32_t sourceId, uint32_t lineNumber,
+ uint32_t columnNumber,
+ UniquePtr<JSErrorReport> report,
+ HandleString message,
+ Handle<mozilla::Maybe<JS::Value>> cause,
+ HandleObject protoArg /* = nullptr */) {
+ AssertObjectIsSavedFrameOrWrapper(cx, stack);
+
+ RootedObject proto(cx, protoArg);
+ if (!proto) {
+ proto = GlobalObject::getOrCreateCustomErrorPrototype(cx, cx->global(),
+ errorType);
+ if (!proto) {
+ return nullptr;
+ }
+ }
+
+ Rooted<ErrorObject*> errObject(cx);
+ {
+ const JSClass* clasp = ErrorObject::classForType(errorType);
+ JSObject* obj = NewObjectWithGivenProto(cx, clasp, proto);
+ if (!obj) {
+ return nullptr;
+ }
+ errObject = &obj->as<ErrorObject>();
+ }
+
+ if (!ErrorObject::init(cx, errObject, errorType, std::move(report), fileName,
+ stack, sourceId, lineNumber, columnNumber, message,
+ cause)) {
+ return nullptr;
+ }
+
+ return errObject;
+}
+
+JSErrorReport* js::ErrorObject::getOrCreateErrorReport(JSContext* cx) {
+ if (JSErrorReport* r = getErrorReport()) {
+ return r;
+ }
+
+ // We build an error report on the stack and then use CopyErrorReport to do
+ // the nitty-gritty malloc stuff.
+ JSErrorReport report;
+
+ // Type.
+ JSExnType type_ = type();
+ report.exnType = type_;
+
+ // Filename.
+ RootedString filename(cx, fileName(cx));
+ UniqueChars filenameStr = JS_EncodeStringToUTF8(cx, filename);
+ if (!filenameStr) {
+ return nullptr;
+ }
+ report.filename = filenameStr.get();
+
+ // Coordinates.
+ report.sourceId = sourceId();
+ report.lineno = lineNumber();
+ report.column = columnNumber();
+
+ // Message. Note that |new Error()| will result in an undefined |message|
+ // slot, so we need to explicitly substitute the empty string in that case.
+ RootedString message(cx, getMessage());
+ if (!message) {
+ message = cx->runtime()->emptyString;
+ }
+
+ UniqueChars utf8 = StringToNewUTF8CharsZ(cx, *message);
+ if (!utf8) {
+ return nullptr;
+ }
+ report.initOwnedMessage(utf8.release());
+
+ // Cache and return.
+ UniquePtr<JSErrorReport> copy = CopyErrorReport(cx, &report);
+ if (!copy) {
+ return nullptr;
+ }
+ setReservedSlot(ERROR_REPORT_SLOT, PrivateValue(copy.get()));
+ return copy.release();
+}
+
+static bool FindErrorInstanceOrPrototype(JSContext* cx, HandleObject obj,
+ MutableHandleObject result) {
+ // Walk up the prototype chain until we find an error object instance or
+ // prototype object. This allows code like:
+ // Object.create(Error.prototype).stack
+ // or
+ // function NYI() { }
+ // NYI.prototype = new Error;
+ // (new NYI).stack
+ // to continue returning stacks that are useless, but at least don't throw.
+
+ RootedObject curr(cx, obj);
+ RootedObject target(cx);
+ do {
+ target = CheckedUnwrapStatic(curr);
+ if (!target) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+ if (IsErrorProtoKey(StandardProtoKeyOrNull(target))) {
+ result.set(target);
+ return true;
+ }
+
+ if (!GetPrototype(cx, curr, &curr)) {
+ return false;
+ }
+ } while (curr);
+
+ // We walked the whole prototype chain and did not find an Error
+ // object.
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_INCOMPATIBLE_PROTO, js_Error_str,
+ "(get stack)", obj->getClass()->name);
+ return false;
+}
+
+static MOZ_ALWAYS_INLINE bool IsObject(HandleValue v) { return v.isObject(); }
+
+/* static */
+bool js::ErrorObject::getStack(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ // We accept any object here, because of poor-man's subclassing of Error.
+ return CallNonGenericMethod<IsObject, getStack_impl>(cx, args);
+}
+
+/* static */
+bool js::ErrorObject::getStack_impl(JSContext* cx, const CallArgs& args) {
+ RootedObject thisObj(cx, &args.thisv().toObject());
+
+ RootedObject obj(cx);
+ if (!FindErrorInstanceOrPrototype(cx, thisObj, &obj)) {
+ return false;
+ }
+
+ if (!obj->is<ErrorObject>()) {
+ args.rval().setString(cx->runtime()->emptyString);
+ return true;
+ }
+
+ // Do frame filtering based on the ErrorObject's principals. This ensures we
+ // don't see chrome frames when chrome code accesses .stack over Xrays.
+ JSPrincipals* principals = obj->as<ErrorObject>().realm()->principals();
+
+ RootedObject savedFrameObj(cx, obj->as<ErrorObject>().stack());
+ RootedString stackString(cx);
+ if (!BuildStackString(cx, principals, savedFrameObj, &stackString)) {
+ return false;
+ }
+
+ if (cx->runtime()->stackFormat() == js::StackFormat::V8) {
+ // When emulating V8 stack frames, we also need to prepend the
+ // stringified Error to the stack string.
+ Handle<PropertyName*> name = cx->names().ErrorToStringWithTrailingNewline;
+ FixedInvokeArgs<0> args2(cx);
+ RootedValue rval(cx);
+ if (!CallSelfHostedFunction(cx, name, args.thisv(), args2, &rval)) {
+ return false;
+ }
+
+ if (!rval.isString()) {
+ args.rval().setString(cx->runtime()->emptyString);
+ return true;
+ }
+
+ RootedString stringified(cx, rval.toString());
+ stackString = ConcatStrings<CanGC>(cx, stringified, stackString);
+ }
+
+ args.rval().setString(stackString);
+ return true;
+}
+
+/* static */
+bool js::ErrorObject::setStack(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ // We accept any object here, because of poor-man's subclassing of Error.
+ return CallNonGenericMethod<IsObject, setStack_impl>(cx, args);
+}
+
+/* static */
+bool js::ErrorObject::setStack_impl(JSContext* cx, const CallArgs& args) {
+ RootedObject thisObj(cx, &args.thisv().toObject());
+
+ if (!args.requireAtLeast(cx, "(set stack)", 1)) {
+ return false;
+ }
+ RootedValue val(cx, args[0]);
+
+ return DefineDataProperty(cx, thisObj, cx->names().stack, val);
+}
+
+void js::ErrorObject::setFromWasmTrap() {
+ MOZ_ASSERT(mightBeWasmTrap());
+ MOZ_ASSERT(JSCLASS_RESERVED_SLOTS(getClass()) > WASM_TRAP_SLOT);
+ setReservedSlot(WASM_TRAP_SLOT, BooleanValue(true));
+}
+
+JSString* js::ErrorToSource(JSContext* cx, HandleObject obj) {
+ RootedValue nameVal(cx);
+ RootedString name(cx);
+ if (!GetProperty(cx, obj, obj, cx->names().name, &nameVal) ||
+ !(name = ToString<CanGC>(cx, nameVal))) {
+ return nullptr;
+ }
+
+ RootedValue messageVal(cx);
+ RootedString message(cx);
+ if (!GetProperty(cx, obj, obj, cx->names().message, &messageVal) ||
+ !(message = ValueToSource(cx, messageVal))) {
+ return nullptr;
+ }
+
+ RootedValue filenameVal(cx);
+ RootedString filename(cx);
+ if (!GetProperty(cx, obj, obj, cx->names().fileName, &filenameVal) ||
+ !(filename = ValueToSource(cx, filenameVal))) {
+ return nullptr;
+ }
+
+ RootedValue linenoVal(cx);
+ uint32_t lineno;
+ if (!GetProperty(cx, obj, obj, cx->names().lineNumber, &linenoVal) ||
+ !ToUint32(cx, linenoVal, &lineno)) {
+ return nullptr;
+ }
+
+ JSStringBuilder sb(cx);
+ if (!sb.append("(new ") || !sb.append(name) || !sb.append("(")) {
+ return nullptr;
+ }
+
+ if (!sb.append(message)) {
+ return nullptr;
+ }
+
+ if (!filename->empty()) {
+ if (!sb.append(", ") || !sb.append(filename)) {
+ return nullptr;
+ }
+ }
+ if (lineno != 0) {
+ /* We have a line, but no filename, add empty string */
+ if (filename->empty() && !sb.append(", \"\"")) {
+ return nullptr;
+ }
+
+ JSString* linenumber = ToString<CanGC>(cx, linenoVal);
+ if (!linenumber) {
+ return nullptr;
+ }
+ if (!sb.append(", ") || !sb.append(linenumber)) {
+ return nullptr;
+ }
+ }
+
+ if (!sb.append("))")) {
+ return nullptr;
+ }
+
+ return sb.finishString();
+}
+
+/*
+ * Return a string that may eval to something similar to the original object.
+ */
+static bool exn_toSource(JSContext* cx, unsigned argc, Value* vp) {
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return false;
+ }
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ RootedObject obj(cx, ToObject(cx, args.thisv()));
+ if (!obj) {
+ return false;
+ }
+
+ JSString* str = ErrorToSource(cx, obj);
+ if (!str) {
+ return false;
+ }
+
+ args.rval().setString(str);
+ return true;
+}
diff --git a/js/src/vm/ErrorObject.h b/js/src/vm/ErrorObject.h
new file mode 100644
index 0000000000..d0424fe429
--- /dev/null
+++ b/js/src/vm/ErrorObject.h
@@ -0,0 +1,167 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ErrorObject_h_
+#define vm_ErrorObject_h_
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Maybe.h"
+
+#include <iterator>
+#include <stdint.h>
+
+#include "jspubtd.h"
+#include "NamespaceImports.h"
+
+#include "js/Class.h"
+#include "js/ErrorReport.h"
+#include "js/RootingAPI.h"
+#include "js/TypeDecls.h"
+#include "js/UniquePtr.h"
+#include "js/Value.h"
+#include "vm/JSObject.h"
+#include "vm/NativeObject.h"
+
+namespace js {
+
+class ErrorObject : public NativeObject {
+ static JSObject* createProto(JSContext* cx, JSProtoKey key);
+
+ static JSObject* createConstructor(JSContext* cx, JSProtoKey key);
+
+ static bool init(JSContext* cx, Handle<ErrorObject*> obj, JSExnType type,
+ UniquePtr<JSErrorReport> errorReport, HandleString fileName,
+ HandleObject stack, uint32_t sourceId, uint32_t lineNumber,
+ uint32_t columnNumber, HandleString message,
+ Handle<mozilla::Maybe<JS::Value>> cause);
+
+ static const ClassSpec classSpecs[JSEXN_ERROR_LIMIT];
+ static const JSClass protoClasses[JSEXN_ERROR_LIMIT];
+
+ protected:
+ static const uint32_t STACK_SLOT = 0;
+ static const uint32_t ERROR_REPORT_SLOT = STACK_SLOT + 1;
+ static const uint32_t FILENAME_SLOT = ERROR_REPORT_SLOT + 1;
+ static const uint32_t LINENUMBER_SLOT = FILENAME_SLOT + 1;
+ static const uint32_t COLUMNNUMBER_SLOT = LINENUMBER_SLOT + 1;
+ static const uint32_t MESSAGE_SLOT = COLUMNNUMBER_SLOT + 1;
+ static const uint32_t CAUSE_SLOT = MESSAGE_SLOT + 1;
+ static const uint32_t SOURCEID_SLOT = CAUSE_SLOT + 1;
+
+ static const uint32_t RESERVED_SLOTS = SOURCEID_SLOT + 1;
+
+ // This slot is only used for errors that could be Wasm traps.
+ static const uint32_t WASM_TRAP_SLOT = SOURCEID_SLOT + 1;
+ static const uint32_t RESERVED_SLOTS_MAYBE_WASM_TRAP = WASM_TRAP_SLOT + 1;
+
+ public:
+ static const JSClass classes[JSEXN_ERROR_LIMIT];
+
+ static const JSClass* classForType(JSExnType type) {
+ MOZ_ASSERT(type < JSEXN_ERROR_LIMIT);
+ return &classes[type];
+ }
+
+ static bool isErrorClass(const JSClass* clasp) {
+ return &classes[0] <= clasp && clasp < &classes[0] + std::size(classes);
+ }
+
+ // Create an error of the given type corresponding to the provided location
+ // info. If |message| is non-null, then the error will have a .message
+ // property with that value; otherwise the error will have no .message
+ // property.
+ static ErrorObject* create(JSContext* cx, JSExnType type, HandleObject stack,
+ HandleString fileName, uint32_t sourceId,
+ uint32_t lineNumber, uint32_t columnNumber,
+ UniquePtr<JSErrorReport> report,
+ HandleString message,
+ Handle<mozilla::Maybe<JS::Value>> cause,
+ HandleObject proto = nullptr);
+
+ /*
+ * Assign the initial error shape to the empty object. (This shape does
+ * *not* include .message, which must be added separately if needed; see
+ * ErrorObject::init.)
+ */
+ static SharedShape* assignInitialShape(JSContext* cx,
+ Handle<ErrorObject*> obj);
+
+ JSExnType type() const {
+ MOZ_ASSERT(isErrorClass(getClass()));
+ return static_cast<JSExnType>(getClass() - &classes[0]);
+ }
+
+ JSErrorReport* getErrorReport() const {
+ const Value& slot = getReservedSlot(ERROR_REPORT_SLOT);
+ if (slot.isUndefined()) {
+ return nullptr;
+ }
+ return static_cast<JSErrorReport*>(slot.toPrivate());
+ }
+
+ JSErrorReport* getOrCreateErrorReport(JSContext* cx);
+
+ inline JSString* fileName(JSContext* cx) const;
+ inline uint32_t sourceId() const;
+ inline uint32_t lineNumber() const;
+ inline uint32_t columnNumber() const;
+ inline JSObject* stack() const;
+
+ JSString* getMessage() const {
+ Value val = getReservedSlot(MESSAGE_SLOT);
+ return val.isString() ? val.toString() : nullptr;
+ }
+
+ mozilla::Maybe<Value> getCause() const {
+ const auto& value = getReservedSlot(CAUSE_SLOT);
+ if (value.isMagic(JS_ERROR_WITHOUT_CAUSE)) {
+ return mozilla::Nothing();
+ }
+ return mozilla::Some(value);
+ }
+
+ void setStackSlot(const Value& stack) {
+ MOZ_ASSERT(stack.isObjectOrNull());
+ setReservedSlot(STACK_SLOT, stack);
+ }
+
+ void setCauseSlot(const Value& cause) {
+ MOZ_ASSERT(!cause.isMagic());
+ MOZ_ASSERT(getCause().isSome());
+ setReservedSlot(CAUSE_SLOT, cause);
+ }
+
+ // Getter and setter for the Error.prototype.stack accessor.
+ static bool getStack(JSContext* cx, unsigned argc, Value* vp);
+ static bool getStack_impl(JSContext* cx, const CallArgs& args);
+ static bool setStack(JSContext* cx, unsigned argc, Value* vp);
+ static bool setStack_impl(JSContext* cx, const CallArgs& args);
+
+ // Used to distinguish errors created from Wasm traps.
+ bool mightBeWasmTrap() const {
+ return type() == JSEXN_WASMRUNTIMEERROR || type() == JSEXN_INTERNALERR;
+ }
+ bool fromWasmTrap() const {
+ if (!mightBeWasmTrap()) {
+ return false;
+ } else {
+ MOZ_ASSERT(JSCLASS_RESERVED_SLOTS(getClass()) > WASM_TRAP_SLOT);
+ return getReservedSlot(WASM_TRAP_SLOT).toBoolean();
+ }
+ }
+ void setFromWasmTrap();
+};
+
+JSString* ErrorToSource(JSContext* cx, HandleObject obj);
+
+} // namespace js
+
+template <>
+inline bool JSObject::is<js::ErrorObject>() const {
+ return js::ErrorObject::isErrorClass(getClass());
+}
+
+#endif // vm_ErrorObject_h_
diff --git a/js/src/vm/ErrorReporting.cpp b/js/src/vm/ErrorReporting.cpp
new file mode 100644
index 0000000000..cac51e7e0a
--- /dev/null
+++ b/js/src/vm/ErrorReporting.cpp
@@ -0,0 +1,585 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/ErrorReporting.h"
+
+#include <stdarg.h>
+#include <utility>
+
+#include "jsexn.h"
+#include "jsfriendapi.h"
+
+#include "frontend/FrontendContext.h" // AutoReportFrontendContext
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/Printf.h" // JS_vsmprintf
+#include "js/Warnings.h" // JS::WarningReporter
+#include "vm/FrameIter.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSContext.h"
+
+using namespace js;
+
+using JS::HandleObject;
+using JS::HandleValue;
+using JS::UniqueTwoByteChars;
+
+void js::CallWarningReporter(JSContext* cx, JSErrorReport* reportp) {
+ MOZ_ASSERT(reportp->isWarning());
+
+ if (JS::WarningReporter warningReporter = cx->runtime()->warningReporter) {
+ warningReporter(cx, reportp);
+ }
+}
+
+bool js::CompileError::throwError(JSContext* cx) {
+ if (isWarning()) {
+ CallWarningReporter(cx, this);
+ return true;
+ }
+
+ // If there's a runtime exception type associated with this error
+ // number, set that as the pending exception. For errors occurring at
+ // compile time, this is very likely to be a JSEXN_SYNTAXERR.
+ return ErrorToException(cx, this, nullptr, nullptr);
+}
+
+bool js::ReportExceptionClosure::operator()(JSContext* cx) {
+ cx->setPendingException(exn_, ShouldCaptureStack::Always);
+ return false;
+}
+
+bool js::ReportCompileWarning(FrontendContext* fc, ErrorMetadata&& metadata,
+ UniquePtr<JSErrorNotes> notes,
+ unsigned errorNumber, va_list* args) {
+ // On the main thread, report the error immediately. When compiling off
+ // thread, save the error so that the thread finishing the parse can report
+ // it later.
+ CompileError err;
+
+ err.notes = std::move(notes);
+ err.isWarning_ = true;
+ err.errorNumber = errorNumber;
+
+ err.filename = metadata.filename;
+ err.lineno = metadata.lineNumber;
+ err.column = metadata.columnNumber;
+ err.isMuted = metadata.isMuted;
+
+ if (UniqueTwoByteChars lineOfContext = std::move(metadata.lineOfContext)) {
+ err.initOwnedLinebuf(lineOfContext.release(), metadata.lineLength,
+ metadata.tokenOffset);
+ }
+
+ if (!ExpandErrorArgumentsVA(fc, GetErrorMessage, nullptr, errorNumber,
+ ArgumentsAreLatin1, &err, *args)) {
+ return false;
+ }
+
+ return fc->reportWarning(std::move(err));
+}
+
+static void ReportCompileErrorImpl(FrontendContext* fc,
+ js::ErrorMetadata&& metadata,
+ js::UniquePtr<JSErrorNotes> notes,
+ unsigned errorNumber, va_list* args,
+ ErrorArgumentsType argumentsType) {
+ js::CompileError err;
+
+ err.notes = std::move(notes);
+ err.isWarning_ = false;
+ err.errorNumber = errorNumber;
+
+ err.filename = metadata.filename;
+ err.lineno = metadata.lineNumber;
+ err.column = metadata.columnNumber;
+ err.isMuted = metadata.isMuted;
+
+ if (UniqueTwoByteChars lineOfContext = std::move(metadata.lineOfContext)) {
+ err.initOwnedLinebuf(lineOfContext.release(), metadata.lineLength,
+ metadata.tokenOffset);
+ }
+
+ if (!js::ExpandErrorArgumentsVA(fc, js::GetErrorMessage, nullptr, errorNumber,
+ argumentsType, &err, *args)) {
+ return;
+ }
+
+ fc->reportError(std::move(err));
+}
+
+void js::ReportCompileErrorLatin1(FrontendContext* fc, ErrorMetadata&& metadata,
+ UniquePtr<JSErrorNotes> notes,
+ unsigned errorNumber, va_list* args) {
+ ReportCompileErrorImpl(fc, std::move(metadata), std::move(notes), errorNumber,
+ args, ArgumentsAreLatin1);
+}
+
+void js::ReportCompileErrorUTF8(FrontendContext* fc, ErrorMetadata&& metadata,
+ UniquePtr<JSErrorNotes> notes,
+ unsigned errorNumber, va_list* args) {
+ ReportCompileErrorImpl(fc, std::move(metadata), std::move(notes), errorNumber,
+ args, ArgumentsAreUTF8);
+}
+
+void js::ReportErrorToGlobal(JSContext* cx, Handle<GlobalObject*> global,
+ HandleValue error) {
+ MOZ_ASSERT(!cx->isExceptionPending());
+#ifdef DEBUG
+ // No assertSameCompartment version that doesn't take JSContext...
+ if (error.isObject()) {
+ AssertSameCompartment(global, &error.toObject());
+ }
+#endif // DEBUG
+ js::ReportExceptionClosure report(error);
+ PrepareScriptEnvironmentAndInvoke(cx, global, report);
+}
+
+static bool ReportError(JSContext* cx, JSErrorReport* reportp,
+ JSErrorCallback callback, void* userRef) {
+ if (reportp->isWarning()) {
+ CallWarningReporter(cx, reportp);
+ return true;
+ }
+
+ // Check the error report, and set a JavaScript-catchable exception
+ // if the error is defined to have an associated exception.
+ return ErrorToException(cx, reportp, callback, userRef);
+}
+
+/*
+ * The given JSErrorReport object have been zeroed and must not outlive
+ * cx->fp() (otherwise owned fields may become invalid).
+ */
+static void PopulateReportBlame(JSContext* cx, JSErrorReport* report) {
+ JS::Realm* realm = cx->realm();
+ if (!realm) {
+ return;
+ }
+
+ /*
+ * Walk stack until we find a frame that is associated with a non-builtin
+ * rather than a builtin frame and which we're allowed to know about.
+ */
+ NonBuiltinFrameIter iter(cx, realm->principals());
+ if (iter.done()) {
+ return;
+ }
+
+ report->filename = iter.filename();
+ if (iter.hasScript()) {
+ report->sourceId = iter.script()->scriptSource()->id();
+ }
+ uint32_t column;
+ report->lineno = iter.computeLine(&column);
+ report->column = FixupColumnForDisplay(column);
+ report->isMuted = iter.mutedErrors();
+}
+
+class MOZ_RAII AutoMessageArgs {
+ size_t totalLength_;
+ /* only {0} thru {9} supported */
+ mozilla::Array<const char*, JS::MaxNumErrorArguments> args_;
+ mozilla::Array<size_t, JS::MaxNumErrorArguments> lengths_;
+ uint16_t count_;
+ bool allocatedElements_ : 1;
+
+ public:
+ AutoMessageArgs() : totalLength_(0), count_(0), allocatedElements_(false) {
+ PodArrayZero(args_);
+ }
+
+ ~AutoMessageArgs() {
+ /* free the arguments only if we allocated them */
+ if (allocatedElements_) {
+ uint16_t i = 0;
+ while (i < count_) {
+ if (args_[i]) {
+ js_free((void*)args_[i]);
+ }
+ i++;
+ }
+ }
+ }
+
+ const char* args(size_t i) const {
+ MOZ_ASSERT(i < count_);
+ return args_[i];
+ }
+
+ size_t totalLength() const { return totalLength_; }
+
+ size_t lengths(size_t i) const {
+ MOZ_ASSERT(i < count_);
+ return lengths_[i];
+ }
+
+ uint16_t count() const { return count_; }
+
+ /* Gather the arguments into an array, and accumulate their sizes.
+ *
+ * We could template on the type of argsArg, but we're already trusting people
+ * to do the right thing with varargs, so might as well trust them on this
+ * part too. Upstream consumers do assert that it's the right thing. Also,
+ * if argsArg were strongly typed we'd still need casting below for this to
+ * compile, because typeArg is not known at compile-time here.
+ */
+ template <typename Allocator>
+ bool init(Allocator* alloc, void* argsArg, uint16_t countArg,
+ ErrorArgumentsType typeArg, va_list ap) {
+ MOZ_ASSERT(countArg > 0);
+
+ count_ = countArg;
+
+ for (uint16_t i = 0; i < count_; i++) {
+ switch (typeArg) {
+ case ArgumentsAreASCII:
+ case ArgumentsAreUTF8: {
+ const char* c = argsArg ? static_cast<const char**>(argsArg)[i]
+ : va_arg(ap, const char*);
+ args_[i] = c;
+ MOZ_ASSERT_IF(typeArg == ArgumentsAreASCII,
+ JS::StringIsASCII(args_[i]));
+ lengths_[i] = strlen(args_[i]);
+ break;
+ }
+ case ArgumentsAreLatin1: {
+ MOZ_ASSERT(!argsArg);
+ const Latin1Char* latin1 = va_arg(ap, Latin1Char*);
+ size_t len = strlen(reinterpret_cast<const char*>(latin1));
+ mozilla::Range<const Latin1Char> range(latin1, len);
+ char* utf8 = JS::CharsToNewUTF8CharsZ(alloc, range).c_str();
+ if (!utf8) {
+ return false;
+ }
+
+ args_[i] = utf8;
+ lengths_[i] = strlen(utf8);
+ allocatedElements_ = true;
+ break;
+ }
+ case ArgumentsAreUnicode: {
+ const char16_t* uc = argsArg
+ ? static_cast<const char16_t**>(argsArg)[i]
+ : va_arg(ap, const char16_t*);
+ size_t len = js_strlen(uc);
+ mozilla::Range<const char16_t> range(uc, len);
+ char* utf8 = JS::CharsToNewUTF8CharsZ(alloc, range).c_str();
+ if (!utf8) {
+ return false;
+ }
+
+ args_[i] = utf8;
+ lengths_[i] = strlen(utf8);
+ allocatedElements_ = true;
+ break;
+ }
+ }
+ totalLength_ += lengths_[i];
+ }
+ return true;
+ }
+};
+
+/*
+ * The arguments from ap need to be packaged up into an array and stored
+ * into the report struct.
+ *
+ * The format string addressed by the error number may contain operands
+ * identified by the format {N}, where N is a decimal digit. Each of these
+ * is to be replaced by the Nth argument from the va_list. The complete
+ * message is placed into reportp->message_.
+ *
+ * Returns true if the expansion succeeds (can fail if out of memory).
+ *
+ * messageArgs is a `const char**` or a `const char16_t**` but templating on
+ * that is not worth it here because AutoMessageArgs takes a void* anyway, and
+ * using void* here simplifies our callers a bit.
+ */
+template <typename T>
+static bool ExpandErrorArgumentsHelper(FrontendContext* fc,
+ JSErrorCallback callback, void* userRef,
+ const unsigned errorNumber,
+ void* messageArgs,
+ ErrorArgumentsType argumentsType,
+ T* reportp, va_list ap) {
+ const JSErrorFormatString* efs;
+
+ if (!callback) {
+ callback = GetErrorMessage;
+ }
+
+ efs = fc->gcSafeCallback(callback, userRef, errorNumber);
+
+ if (efs) {
+ if constexpr (std::is_same_v<T, JSErrorReport>) {
+ reportp->exnType = efs->exnType;
+ }
+
+ MOZ_ASSERT(reportp->errorNumber == errorNumber);
+ reportp->errorMessageName = efs->name;
+
+ MOZ_ASSERT_IF(argumentsType == ArgumentsAreASCII,
+ JS::StringIsASCII(efs->format));
+
+ uint16_t argCount = efs->argCount;
+ MOZ_RELEASE_ASSERT(argCount <= JS::MaxNumErrorArguments);
+ if (argCount > 0) {
+ /*
+ * Parse the error format, substituting the argument X
+ * for {X} in the format.
+ */
+ if (efs->format) {
+ const char* fmt;
+ char* out;
+#ifdef DEBUG
+ int expandedArgs = 0;
+#endif
+ size_t expandedLength;
+ size_t len = strlen(efs->format);
+
+ AutoMessageArgs args;
+ if (!args.init(fc->getAllocator(), messageArgs, argCount, argumentsType,
+ ap)) {
+ return false;
+ }
+
+ expandedLength = len - (3 * args.count()) /* exclude the {n} */
+ + args.totalLength();
+
+ /*
+ * Note - the above calculation assumes that each argument
+ * is used once and only once in the expansion !!!
+ */
+ char* utf8 = out =
+ fc->getAllocator()->pod_malloc<char>(expandedLength + 1);
+ if (!out) {
+ return false;
+ }
+
+ fmt = efs->format;
+ while (*fmt) {
+ if (*fmt == '{') {
+ if (mozilla::IsAsciiDigit(fmt[1])) {
+ int d = AsciiDigitToNumber(fmt[1]);
+ MOZ_RELEASE_ASSERT(d < args.count());
+ strncpy(out, args.args(d), args.lengths(d));
+ out += args.lengths(d);
+ fmt += 3;
+#ifdef DEBUG
+ expandedArgs++;
+#endif
+ continue;
+ }
+ }
+ *out++ = *fmt++;
+ }
+ MOZ_ASSERT(expandedArgs == args.count());
+ *out = 0;
+
+ reportp->initOwnedMessage(utf8);
+ }
+ } else {
+ /* Non-null messageArgs should have at least one non-null arg. */
+ MOZ_ASSERT(!messageArgs);
+ /*
+ * Zero arguments: the format string (if it exists) is the
+ * entire message.
+ */
+ if (efs->format) {
+ reportp->initBorrowedMessage(efs->format);
+ }
+ }
+ }
+ if (!reportp->message()) {
+ /* where's the right place for this ??? */
+ const char* defaultErrorMessage =
+ "No error message available for error number %d";
+ size_t nbytes = strlen(defaultErrorMessage) + 16;
+ char* message = fc->getAllocator()->pod_malloc<char>(nbytes);
+ if (!message) {
+ return false;
+ }
+ snprintf(message, nbytes, defaultErrorMessage, errorNumber);
+ reportp->initOwnedMessage(message);
+ }
+ return true;
+}
+
+bool js::ExpandErrorArgumentsVA(FrontendContext* fc, JSErrorCallback callback,
+ void* userRef, const unsigned errorNumber,
+ const char16_t** messageArgs,
+ ErrorArgumentsType argumentsType,
+ JSErrorReport* reportp, va_list ap) {
+ MOZ_ASSERT(argumentsType == ArgumentsAreUnicode);
+ return ExpandErrorArgumentsHelper(fc, callback, userRef, errorNumber,
+ messageArgs, argumentsType, reportp, ap);
+}
+
+bool js::ExpandErrorArgumentsVA(FrontendContext* fc, JSErrorCallback callback,
+ void* userRef, const unsigned errorNumber,
+ const char** messageArgs,
+ ErrorArgumentsType argumentsType,
+ JSErrorReport* reportp, va_list ap) {
+ MOZ_ASSERT(argumentsType != ArgumentsAreUnicode);
+ return ExpandErrorArgumentsHelper(fc, callback, userRef, errorNumber,
+ messageArgs, argumentsType, reportp, ap);
+}
+
+bool js::ExpandErrorArgumentsVA(FrontendContext* fc, JSErrorCallback callback,
+ void* userRef, const unsigned errorNumber,
+ ErrorArgumentsType argumentsType,
+ JSErrorReport* reportp, va_list ap) {
+ return ExpandErrorArgumentsHelper(fc, callback, userRef, errorNumber, nullptr,
+ argumentsType, reportp, ap);
+}
+
+bool js::ExpandErrorArgumentsVA(FrontendContext* fc, JSErrorCallback callback,
+ void* userRef, const unsigned errorNumber,
+ const char16_t** messageArgs,
+ ErrorArgumentsType argumentsType,
+ JSErrorNotes::Note* notep, va_list ap) {
+ return ExpandErrorArgumentsHelper(fc, callback, userRef, errorNumber,
+ messageArgs, argumentsType, notep, ap);
+}
+
+bool js::ReportErrorNumberVA(JSContext* cx, IsWarning isWarning,
+ JSErrorCallback callback, void* userRef,
+ const unsigned errorNumber,
+ ErrorArgumentsType argumentsType, va_list ap) {
+ JSErrorReport report;
+ report.isWarning_ = isWarning == IsWarning::Yes;
+ report.errorNumber = errorNumber;
+ PopulateReportBlame(cx, &report);
+
+ AutoReportFrontendContext fc(cx);
+ if (!ExpandErrorArgumentsVA(&fc, callback, userRef, errorNumber,
+ argumentsType, &report, ap)) {
+ return false;
+ }
+
+ if (!ReportError(cx, &report, callback, userRef)) {
+ return false;
+ }
+
+ return report.isWarning();
+}
+
+template <typename CharT>
+static bool ExpandErrorArguments(FrontendContext* fc, JSErrorCallback callback,
+ void* userRef, const unsigned errorNumber,
+ const CharT** messageArgs,
+ js::ErrorArgumentsType argumentsType,
+ JSErrorReport* reportp, ...) {
+ va_list ap;
+ va_start(ap, reportp);
+ bool expanded =
+ js::ExpandErrorArgumentsVA(fc, callback, userRef, errorNumber,
+ messageArgs, argumentsType, reportp, ap);
+ va_end(ap);
+ return expanded;
+}
+
+template <js::ErrorArgumentsType argType, typename CharT>
+static bool ReportErrorNumberArray(JSContext* cx, IsWarning isWarning,
+ JSErrorCallback callback, void* userRef,
+ const unsigned errorNumber,
+ const CharT** args) {
+ static_assert(
+ (argType == ArgumentsAreUnicode && std::is_same_v<CharT, char16_t>) ||
+ (argType != ArgumentsAreUnicode && std::is_same_v<CharT, char>),
+ "Mismatch between character type and argument type");
+
+ JSErrorReport report;
+ report.isWarning_ = isWarning == IsWarning::Yes;
+ report.errorNumber = errorNumber;
+ PopulateReportBlame(cx, &report);
+
+ AutoReportFrontendContext fc(cx);
+ if (!ExpandErrorArguments(&fc, callback, userRef, errorNumber, args, argType,
+ &report)) {
+ return false;
+ }
+
+ if (!ReportError(cx, &report, callback, userRef)) {
+ return false;
+ }
+
+ return report.isWarning();
+}
+
+bool js::ReportErrorNumberUCArray(JSContext* cx, IsWarning isWarning,
+ JSErrorCallback callback, void* userRef,
+ const unsigned errorNumber,
+ const char16_t** args) {
+ return ReportErrorNumberArray<ArgumentsAreUnicode>(
+ cx, isWarning, callback, userRef, errorNumber, args);
+}
+
+bool js::ReportErrorNumberUTF8Array(JSContext* cx, IsWarning isWarning,
+ JSErrorCallback callback, void* userRef,
+ const unsigned errorNumber,
+ const char** args) {
+ return ReportErrorNumberArray<ArgumentsAreUTF8>(cx, isWarning, callback,
+ userRef, errorNumber, args);
+}
+
+bool js::ReportErrorVA(JSContext* cx, IsWarning isWarning, const char* format,
+ js::ErrorArgumentsType argumentsType, va_list ap) {
+ JSErrorReport report;
+
+ UniqueChars message(JS_vsmprintf(format, ap));
+ if (!message) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ MOZ_ASSERT_IF(argumentsType == ArgumentsAreASCII,
+ JS::StringIsASCII(message.get()));
+
+ report.isWarning_ = isWarning == IsWarning::Yes;
+ report.errorNumber = JSMSG_USER_DEFINED_ERROR;
+ if (argumentsType == ArgumentsAreASCII || argumentsType == ArgumentsAreUTF8) {
+ report.initOwnedMessage(message.release());
+ } else {
+ MOZ_ASSERT(argumentsType == ArgumentsAreLatin1);
+ JS::Latin1Chars latin1(message.get(), strlen(message.get()));
+ JS::UTF8CharsZ utf8(JS::CharsToNewUTF8CharsZ(cx, latin1));
+ if (!utf8) {
+ return false;
+ }
+ report.initOwnedMessage(reinterpret_cast<const char*>(utf8.get()));
+ }
+ PopulateReportBlame(cx, &report);
+
+ if (!ReportError(cx, &report, nullptr, nullptr)) {
+ return false;
+ }
+
+ return report.isWarning();
+}
+
+void js::MaybePrintAndClearPendingException(JSContext* cx) {
+ if (!cx->isExceptionPending()) {
+ return;
+ }
+
+ AutoClearPendingException acpe(cx);
+
+ JS::ExceptionStack exnStack(cx);
+ if (!JS::StealPendingExceptionStack(cx, &exnStack)) {
+ fprintf(stderr, "error getting pending exception\n");
+ return;
+ }
+
+ JS::ErrorReportBuilder report(cx);
+ if (!report.init(cx, exnStack, JS::ErrorReportBuilder::WithSideEffects)) {
+ fprintf(stderr, "out of memory initializing JS::ErrorReportBuilder\n");
+ return;
+ }
+
+ MOZ_ASSERT(!report.report()->isWarning());
+ JS::PrintError(stderr, report, true);
+}
diff --git a/js/src/vm/ErrorReporting.h b/js/src/vm/ErrorReporting.h
new file mode 100644
index 0000000000..292ae2d47e
--- /dev/null
+++ b/js/src/vm/ErrorReporting.h
@@ -0,0 +1,190 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ErrorReporting_h
+#define vm_ErrorReporting_h
+
+#include <stdarg.h>
+
+#include "jsfriendapi.h" // for ScriptEnvironmentPreparer
+
+#include "js/ErrorReport.h" // for JSErrorNotes, JSErrorReport
+#include "js/UniquePtr.h" // for UniquePtr
+#include "js/Utility.h" // for UniqueTwoByteChars
+
+namespace js {
+
+class FrontendContext;
+
+/**
+ * Use this type instead of JSContext when the object is only used for its
+ * ability to allocate memory (via its MallocProvider methods).
+ */
+using JSAllocator = JSContext;
+
+/**
+ * Metadata for a compilation error (or warning) at a particular offset, or at
+ * no offset (i.e. with respect to a script overall).
+ */
+struct ErrorMetadata {
+ // The file/URL where the error occurred.
+ const char* filename;
+
+ // The line and column numbers where the error occurred. If the error
+ // is with respect to the entire script and not with respect to a
+ // particular location, these will both be zero.
+ uint32_t lineNumber;
+ uint32_t columnNumber;
+
+ // If the error occurs at a particular location, context surrounding the
+ // location of the error: the line that contained the error, or a small
+ // portion of it if the line is long. (If the error occurs within a
+ // regular expression, this context is based upon its pattern characters.)
+ //
+ // This information is provided on a best-effort basis: code populating
+ // ErrorMetadata instances isn't obligated to supply this.
+ JS::UniqueTwoByteChars lineOfContext;
+
+ // If |lineOfContext| is provided, we show only a portion (a "window") of
+ // the line around the erroneous token -- the first char in the token, plus
+ // |lineOfContextRadius| chars before it and |lineOfContextRadius - 1|
+ // chars after it. This is because for a very long line, the full line is
+ // (a) not that helpful, and (b) wastes a lot of memory. See bug 634444.
+ static constexpr size_t lineOfContextRadius = 60;
+
+ // If |lineOfContext| is non-null, its length.
+ size_t lineLength;
+
+ // If |lineOfContext| is non-null, the offset within it of the token that
+ // triggered the error.
+ size_t tokenOffset;
+
+ // Whether the error is "muted" because it derives from a cross-origin
+ // load. See the comment in TransitiveCompileOptions in jsapi.h for
+ // details.
+ bool isMuted;
+};
+
+class CompileError : public JSErrorReport {
+ public:
+ bool throwError(JSContext* cx);
+};
+
+class MOZ_STACK_CLASS ReportExceptionClosure final
+ : public ScriptEnvironmentPreparer::Closure {
+ JS::HandleValue exn_;
+
+ public:
+ explicit ReportExceptionClosure(JS::HandleValue exn) : exn_(exn) {}
+
+ bool operator()(JSContext* cx) override;
+};
+
+/** Send a JSErrorReport to the warningReporter callback. */
+extern void CallWarningReporter(JSContext* cx, JSErrorReport* report);
+
+/**
+ * Report a compile error during script processing prior to execution of the
+ * script.
+ */
+extern void ReportCompileErrorLatin1(FrontendContext* fc,
+ ErrorMetadata&& metadata,
+ UniquePtr<JSErrorNotes> notes,
+ unsigned errorNumber, va_list* args);
+
+extern void ReportCompileErrorUTF8(FrontendContext* fc,
+ ErrorMetadata&& metadata,
+ UniquePtr<JSErrorNotes> notes,
+ unsigned errorNumber, va_list* args);
+
+/**
+ * Report a compile warning during script processing prior to execution of the
+ * script. Returns true if the warning was successfully reported, false if an
+ * error occurred.
+ */
+[[nodiscard]] extern bool ReportCompileWarning(FrontendContext* fc,
+ ErrorMetadata&& metadata,
+ UniquePtr<JSErrorNotes> notes,
+ unsigned errorNumber,
+ va_list* args);
+
+class GlobalObject;
+
+/**
+ * Report the given error Value to the given global. The JSContext is not
+ * assumed to be in any particular realm, but the global and error are
+ * expected to be same-compartment.
+ */
+extern void ReportErrorToGlobal(JSContext* cx,
+ JS::Handle<js::GlobalObject*> global,
+ JS::HandleValue error);
+
+enum class IsWarning { No, Yes };
+
+/**
+ * Report an exception, using printf-style APIs to generate the error
+ * message.
+ */
+extern bool ReportErrorVA(JSContext* cx, IsWarning isWarning,
+ const char* format, ErrorArgumentsType argumentsType,
+ va_list ap) MOZ_FORMAT_PRINTF(3, 0);
+
+extern bool ReportErrorNumberVA(JSContext* cx, IsWarning isWarning,
+ JSErrorCallback callback, void* userRef,
+ const unsigned errorNumber,
+ ErrorArgumentsType argumentsType, va_list ap);
+
+extern bool ReportErrorNumberUCArray(JSContext* cx, IsWarning isWarning,
+ JSErrorCallback callback, void* userRef,
+ const unsigned errorNumber,
+ const char16_t** args);
+
+extern bool ReportErrorNumberUTF8Array(JSContext* cx, IsWarning isWarning,
+ JSErrorCallback callback, void* userRef,
+ const unsigned errorNumber,
+ const char** args);
+
+extern bool ExpandErrorArgumentsVA(FrontendContext* fc,
+ JSErrorCallback callback, void* userRef,
+ const unsigned errorNumber,
+ const char16_t** messageArgs,
+ ErrorArgumentsType argumentsType,
+ JSErrorReport* reportp, va_list ap);
+
+extern bool ExpandErrorArgumentsVA(FrontendContext* fc,
+ JSErrorCallback callback, void* userRef,
+ const unsigned errorNumber,
+ const char** messageArgs,
+ ErrorArgumentsType argumentsType,
+ JSErrorReport* reportp, va_list ap);
+
+/*
+ * For cases when we do not have an arguments array.
+ */
+extern bool ExpandErrorArgumentsVA(FrontendContext* fc,
+ JSErrorCallback callback, void* userRef,
+ const unsigned errorNumber,
+ ErrorArgumentsType argumentsType,
+ JSErrorReport* reportp, va_list ap);
+
+extern bool ExpandErrorArgumentsVA(FrontendContext* fc,
+ JSErrorCallback callback, void* userRef,
+ const unsigned errorNumber,
+ const char16_t** messageArgs,
+ ErrorArgumentsType argumentsType,
+ JSErrorNotes::Note* notep, va_list ap);
+
+/*
+ * If there is a pending exception, print it to stderr and clear it. Otherwise
+ * do nothing.
+ *
+ * For reporting bugs or unexpected errors in testing functions.
+ */
+extern void MaybePrintAndClearPendingException(JSContext* cx);
+
+} // namespace js
+
+#endif /* vm_ErrorReporting_h */
diff --git a/js/src/vm/Exception.cpp b/js/src/vm/Exception.cpp
new file mode 100644
index 0000000000..d169e9e4a1
--- /dev/null
+++ b/js/src/vm/Exception.cpp
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/Exception.h"
+
+#include "js/Context.h" // js::AssertHeapIsIdle
+#include "vm/JSContext.h"
+#include "vm/SavedFrame.h"
+
+using namespace js;
+
+bool JS::StealPendingExceptionStack(JSContext* cx,
+ JS::ExceptionStack* exceptionStack) {
+ if (!GetPendingExceptionStack(cx, exceptionStack)) {
+ return false;
+ }
+
+ // "Steal" exception by clearing it.
+ cx->clearPendingException();
+ return true;
+}
+
+bool JS::GetPendingExceptionStack(JSContext* cx,
+ JS::ExceptionStack* exceptionStack) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ MOZ_ASSERT(exceptionStack);
+ MOZ_ASSERT(cx->isExceptionPending());
+
+ RootedValue exception(cx);
+ if (!cx->getPendingException(&exception)) {
+ return false;
+ }
+
+ RootedObject stack(cx, cx->getPendingExceptionStack());
+ exceptionStack->init(exception, stack);
+ return true;
+}
+
+void JS::SetPendingExceptionStack(JSContext* cx,
+ const JS::ExceptionStack& exceptionStack) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ // We don't check the compartments of `exception` and `stack` here,
+ // because we're not doing anything with them other than storing
+ // them, and stored exception values can be in an abitrary
+ // compartment while stored stack values are always the unwrapped
+ // object anyway.
+
+ Rooted<SavedFrame*> nstack(cx);
+ if (exceptionStack.stack()) {
+ nstack = &UncheckedUnwrap(exceptionStack.stack())->as<SavedFrame>();
+ }
+ cx->setPendingException(exceptionStack.exception(), nstack);
+}
diff --git a/js/src/vm/ForOfIterator.cpp b/js/src/vm/ForOfIterator.cpp
new file mode 100644
index 0000000000..44b6a6dd4f
--- /dev/null
+++ b/js/src/vm/ForOfIterator.cpp
@@ -0,0 +1,211 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/ForOfIterator.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "vm/Interpreter.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/PIC.h"
+
+#include "vm/JSContext-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+using JS::ForOfIterator;
+
+bool ForOfIterator::init(HandleValue iterable,
+ NonIterableBehavior nonIterableBehavior) {
+ JSContext* cx = cx_;
+ RootedObject iterableObj(cx, ToObject(cx, iterable));
+ if (!iterableObj) {
+ return false;
+ }
+
+ MOZ_ASSERT(index == NOT_ARRAY);
+
+ // Check the PIC first for a match.
+ if (iterableObj->is<ArrayObject>()) {
+ ForOfPIC::Chain* stubChain = ForOfPIC::getOrCreate(cx);
+ if (!stubChain) {
+ return false;
+ }
+
+ bool optimized;
+ if (!stubChain->tryOptimizeArray(cx, iterableObj.as<ArrayObject>(),
+ &optimized)) {
+ return false;
+ }
+
+ if (optimized) {
+ // Got optimized stub. Array is optimizable.
+ index = 0;
+ iterator = iterableObj;
+ nextMethod.setUndefined();
+ return true;
+ }
+ }
+
+ MOZ_ASSERT(index == NOT_ARRAY);
+
+ RootedValue callee(cx);
+ RootedId iteratorId(cx, PropertyKey::Symbol(cx->wellKnownSymbols().iterator));
+ if (!GetProperty(cx, iterableObj, iterable, iteratorId, &callee)) {
+ return false;
+ }
+
+ // If obj[@@iterator] is undefined and we were asked to allow non-iterables,
+ // bail out now without setting iterator. This will make valueIsIterable(),
+ // which our caller should check, return false.
+ if (nonIterableBehavior == AllowNonIterable && callee.isUndefined()) {
+ return true;
+ }
+
+ // Throw if obj[@@iterator] isn't callable.
+ // js::Invoke is about to check for this kind of error anyway, but it would
+ // throw an inscrutable error message about |method| rather than this nice
+ // one about |obj|.
+ if (!callee.isObject() || !callee.toObject().isCallable()) {
+ UniqueChars bytes =
+ DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, iterable, nullptr);
+ if (!bytes) {
+ return false;
+ }
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_NOT_ITERABLE,
+ bytes.get());
+ return false;
+ }
+
+ RootedValue res(cx);
+ if (!js::Call(cx, callee, iterable, &res)) {
+ return false;
+ }
+
+ if (!res.isObject()) {
+ return ThrowCheckIsObject(cx, CheckIsObjectKind::GetIterator);
+ }
+
+ RootedObject iteratorObj(cx, &res.toObject());
+ if (!GetProperty(cx, iteratorObj, iteratorObj, cx->names().next, &res)) {
+ return false;
+ }
+
+ iterator = iteratorObj;
+ nextMethod = res;
+ return true;
+}
+
+inline bool ForOfIterator::nextFromOptimizedArray(MutableHandleValue vp,
+ bool* done) {
+ MOZ_ASSERT(index != NOT_ARRAY);
+
+ if (!CheckForInterrupt(cx_)) {
+ return false;
+ }
+
+ ArrayObject* arr = &iterator->as<ArrayObject>();
+
+ if (index >= arr->length()) {
+ vp.setUndefined();
+ *done = true;
+ return true;
+ }
+ *done = false;
+
+ // Try to get array element via direct access.
+ if (index < arr->getDenseInitializedLength()) {
+ vp.set(arr->getDenseElement(index));
+ if (!vp.isMagic(JS_ELEMENTS_HOLE)) {
+ ++index;
+ return true;
+ }
+ }
+
+ return GetElement(cx_, iterator, iterator, index++, vp);
+}
+
+bool ForOfIterator::next(MutableHandleValue vp, bool* done) {
+ MOZ_ASSERT(iterator);
+ if (index != NOT_ARRAY) {
+ return nextFromOptimizedArray(vp, done);
+ }
+
+ RootedValue v(cx_);
+ if (!js::Call(cx_, nextMethod, iterator, &v)) {
+ return false;
+ }
+
+ if (!v.isObject()) {
+ return ThrowCheckIsObject(cx_, CheckIsObjectKind::IteratorNext);
+ }
+
+ RootedObject resultObj(cx_, &v.toObject());
+ if (!GetProperty(cx_, resultObj, resultObj, cx_->names().done, &v)) {
+ return false;
+ }
+
+ *done = ToBoolean(v);
+ if (*done) {
+ vp.setUndefined();
+ return true;
+ }
+
+ return GetProperty(cx_, resultObj, resultObj, cx_->names().value, vp);
+}
+
+// ES 2017 draft 0f10dba4ad18de92d47d421f378233a2eae8f077 7.4.6.
+// When completion.[[Type]] is throw.
+void ForOfIterator::closeThrow() {
+ MOZ_ASSERT(iterator);
+
+ RootedValue completionException(cx_);
+ Rooted<SavedFrame*> completionExceptionStack(cx_);
+ if (cx_->isExceptionPending()) {
+ if (!GetAndClearExceptionAndStack(cx_, &completionException,
+ &completionExceptionStack)) {
+ completionException.setUndefined();
+ completionExceptionStack = nullptr;
+ }
+ }
+
+ // Steps 1-2 (implicit)
+
+ // Step 3 (partial).
+ RootedValue returnVal(cx_);
+ if (!GetProperty(cx_, iterator, iterator, cx_->names().return_, &returnVal)) {
+ return;
+ }
+
+ // Step 4.
+ if (returnVal.isUndefined()) {
+ cx_->setPendingException(completionException, completionExceptionStack);
+ return;
+ }
+
+ // Step 3 (remaining part)
+ if (!returnVal.isObject()) {
+ JS_ReportErrorNumberASCII(cx_, GetErrorMessage, nullptr,
+ JSMSG_RETURN_NOT_CALLABLE);
+ return;
+ }
+ RootedObject returnObj(cx_, &returnVal.toObject());
+ if (!returnObj->isCallable()) {
+ JS_ReportErrorNumberASCII(cx_, GetErrorMessage, nullptr,
+ JSMSG_RETURN_NOT_CALLABLE);
+ return;
+ }
+
+ // Step 5.
+ RootedValue innerResultValue(cx_);
+ if (!js::Call(cx_, returnVal, iterator, &innerResultValue)) {
+ if (cx_->isExceptionPending()) {
+ cx_->clearPendingException();
+ }
+ }
+
+ // Step 6.
+ cx_->setPendingException(completionException, completionExceptionStack);
+}
diff --git a/js/src/vm/FrameIter-inl.h b/js/src/vm/FrameIter-inl.h
new file mode 100644
index 0000000000..d5c85c416b
--- /dev/null
+++ b/js/src/vm/FrameIter-inl.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_FrameIter_inl_h
+#define vm_FrameIter_inl_h
+
+#include "vm/FrameIter.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_CRASH
+
+#include "jit/JSJitFrameIter.h" // js::jit::{InlineFrameIterator,MaybeReadFallback,ReadFrame_Actuals}
+
+#include "vm/Stack-inl.h" // js::InterpreterFrame::unaliasedForEachActual
+
+namespace js {
+
+template <class Op>
+inline void FrameIter::unaliasedForEachActual(JSContext* cx, Op op) {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ interpFrame()->unaliasedForEachActual(op);
+ return;
+ case JIT:
+ MOZ_ASSERT(isJSJit());
+ if (jsJitFrame().isIonJS()) {
+ jit::MaybeReadFallback recover(cx, activation()->asJit(),
+ &jsJitFrame());
+ ionInlineFrames_.unaliasedForEachActual(cx, op, recover);
+ } else if (jsJitFrame().isBailoutJS()) {
+ // :TODO: (Bug 1070962) If we are introspecting the frame which is
+ // being bailed, then we might be in the middle of recovering
+ // instructions. Stacking computeInstructionResults implies that we
+ // might be recovering result twice. In the mean time, to avoid
+ // that, we just return Undefined values for instruction results
+ // which are not yet recovered.
+ jit::MaybeReadFallback fallback;
+ ionInlineFrames_.unaliasedForEachActual(cx, op, fallback);
+ } else {
+ MOZ_ASSERT(jsJitFrame().isBaselineJS());
+ jsJitFrame().unaliasedForEachActual(op);
+ }
+ return;
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+} // namespace js
+
+#endif // vm_FrameIter_inl_h
diff --git a/js/src/vm/FrameIter.cpp b/js/src/vm/FrameIter.cpp
new file mode 100644
index 0000000000..b317168ede
--- /dev/null
+++ b/js/src/vm/FrameIter.cpp
@@ -0,0 +1,1060 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/FrameIter-inl.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_CRASH
+#include "mozilla/MaybeOneOf.h" // mozilla::MaybeOneOf
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t
+#include <stdlib.h> // getenv
+
+#include "jit/BaselineFrame.h" // js::jit::BaselineFrame
+#include "jit/JitFrames.h" // js::jit::EnsureUnwoundJitExitFrame
+#include "jit/JSJitFrameIter.h" // js::jit::{FrameType,InlineFrameIterator,JSJitFrameIter,MaybeReadFallback,SnapshotIterator}
+#include "js/GCAPI.h" // JS::AutoSuppressGCAnalysis
+#include "js/Principals.h" // JSSubsumesOp
+#include "js/RootingAPI.h" // JS::Rooted
+#include "vm/Activation.h" // js::Activation{,Iterator}
+#include "vm/EnvironmentObject.h" // js::CallObject
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h" // JSContext
+#include "vm/JSFunction.h" // JSFunction
+#include "vm/JSScript.h" // js::PCToLineNumber, JSScript, js::ScriptSource
+#include "vm/Runtime.h" // JSRuntime
+#include "vm/Stack.h" // js::{AbstractFramePtr,InterpreterFrame,MaybeCheckAliasing}
+#include "wasm/WasmFrameIter.h" // js::wasm::WasmFrameIter
+#include "wasm/WasmInstance.h" // js::wasm::Instance
+
+#include "jit/JSJitFrameIter-inl.h" // js::jit::JSJitFrameIter::baselineFrame{,NumValueSlots}
+#include "vm/Stack-inl.h" // js::AbstractFramePtr::*
+
+namespace JS {
+class JS_PUBLIC_API Realm;
+} // namespace JS
+
+namespace js {
+class ArgumentsObject;
+} // namespace js
+
+using JS::Realm;
+using JS::Rooted;
+using JS::Value;
+
+using js::AbstractFramePtr;
+using js::ArgumentsObject;
+using js::CallObject;
+using js::FrameIter;
+using js::JitFrameIter;
+using js::NonBuiltinFrameIter;
+using js::NonBuiltinScriptFrameIter;
+using js::OnlyJSJitFrameIter;
+using js::ScriptSource;
+using js::jit::JSJitFrameIter;
+
+JitFrameIter::JitFrameIter(const JitFrameIter& another) { *this = another; }
+
+JitFrameIter& JitFrameIter::operator=(const JitFrameIter& another) {
+ MOZ_ASSERT(this != &another);
+
+ act_ = another.act_;
+ mustUnwindActivation_ = another.mustUnwindActivation_;
+
+ if (isSome()) {
+ iter_.destroy();
+ }
+ if (!another.isSome()) {
+ return *this;
+ }
+
+ if (another.isJSJit()) {
+ iter_.construct<jit::JSJitFrameIter>(another.asJSJit());
+ } else {
+ MOZ_ASSERT(another.isWasm());
+ iter_.construct<wasm::WasmFrameIter>(another.asWasm());
+ }
+
+ return *this;
+}
+
+JitFrameIter::JitFrameIter(jit::JitActivation* act, bool mustUnwindActivation) {
+ act_ = act;
+ mustUnwindActivation_ = mustUnwindActivation;
+ MOZ_ASSERT(act->hasExitFP(),
+ "packedExitFP is used to determine if JSJit or wasm");
+ if (act->hasJSExitFP()) {
+ iter_.construct<jit::JSJitFrameIter>(act);
+ } else {
+ MOZ_ASSERT(act->hasWasmExitFP());
+ iter_.construct<wasm::WasmFrameIter>(act);
+ }
+ settle();
+}
+
+void JitFrameIter::skipNonScriptedJSFrames() {
+ if (isJSJit()) {
+ // Stop at the first scripted frame.
+ jit::JSJitFrameIter& frames = asJSJit();
+ while (!frames.isScripted() && !frames.done()) {
+ ++frames;
+ }
+ settle();
+ }
+}
+
+bool JitFrameIter::isSelfHostedIgnoringInlining() const {
+ MOZ_ASSERT(!done());
+
+ if (isWasm()) {
+ return false;
+ }
+
+ return asJSJit().script()->selfHosted();
+}
+
+JS::Realm* JitFrameIter::realm() const {
+ MOZ_ASSERT(!done());
+
+ if (isWasm()) {
+ return asWasm().instance()->realm();
+ }
+
+ return asJSJit().script()->realm();
+}
+
+uint8_t* JitFrameIter::resumePCinCurrentFrame() const {
+ if (isWasm()) {
+ return asWasm().resumePCinCurrentFrame();
+ }
+ return asJSJit().resumePCinCurrentFrame();
+}
+
+bool JitFrameIter::done() const {
+ if (!isSome()) {
+ return true;
+ }
+ if (isJSJit()) {
+ return asJSJit().done();
+ }
+ if (isWasm()) {
+ return asWasm().done();
+ }
+ MOZ_CRASH("unhandled case");
+}
+
+void JitFrameIter::settle() {
+ if (isJSJit()) {
+ const jit::JSJitFrameIter& jitFrame = asJSJit();
+ if (jitFrame.type() != jit::FrameType::WasmToJSJit) {
+ return;
+ }
+
+ // Transition from js jit frames to wasm frames: we're on the
+ // wasm-to-jit fast path. The current stack layout is as follows:
+ // (stack grows downward)
+ //
+ // [--------------------]
+ // [WASM FUNC ]
+ // [WASM JIT EXIT FRAME ]
+ // [JIT WASM ENTRY FRAME] <-- we're here.
+ //
+ // So prevFP points to the wasm jit exit FP, maintaing the invariant in
+ // WasmFrameIter that the first frame is an exit frame and can be
+ // popped.
+
+ wasm::Frame* prevFP = (wasm::Frame*)jitFrame.prevFp();
+
+ if (mustUnwindActivation_) {
+ act_->setWasmExitFP(prevFP);
+ }
+
+ iter_.destroy();
+ iter_.construct<wasm::WasmFrameIter>(act_, prevFP);
+ MOZ_ASSERT(!asWasm().done());
+ return;
+ }
+
+ if (isWasm()) {
+ const wasm::WasmFrameIter& wasmFrame = asWasm();
+ if (!wasmFrame.hasUnwoundJitFrame()) {
+ return;
+ }
+
+ // Transition from wasm frames to jit frames: we're on the
+ // jit-to-wasm fast path. The current stack layout is as follows:
+ // (stack grows downward)
+ //
+ // [--------------------]
+ // [JIT FRAME ]
+ // [WASM JIT ENTRY FRAME] <-- we're here
+ //
+ // The wasm iterator has saved the previous jit frame pointer for us.
+
+ MOZ_ASSERT(wasmFrame.done());
+ uint8_t* prevFP = wasmFrame.unwoundCallerFP();
+ jit::FrameType prevFrameType = wasmFrame.unwoundJitFrameType();
+
+ if (mustUnwindActivation_) {
+ act_->setJSExitFP(prevFP);
+ }
+
+ iter_.destroy();
+ iter_.construct<jit::JSJitFrameIter>(act_, prevFrameType, prevFP);
+ MOZ_ASSERT(!asJSJit().done());
+ return;
+ }
+}
+
+void JitFrameIter::operator++() {
+ MOZ_ASSERT(isSome());
+ if (isJSJit()) {
+ const jit::JSJitFrameIter& jitFrame = asJSJit();
+
+ jit::JitFrameLayout* prevFrame = nullptr;
+ if (mustUnwindActivation_ && jitFrame.isScripted()) {
+ prevFrame = jitFrame.jsFrame();
+ }
+
+ ++asJSJit();
+
+ if (prevFrame) {
+ // Unwind the frame by updating packedExitFP. This is necessary
+ // so that (1) debugger exception unwind and leave frame hooks
+ // don't see this frame when they use ScriptFrameIter, and (2)
+ // ScriptFrameIter does not crash when accessing an IonScript
+ // that's destroyed by the ionScript->decref call.
+ EnsureUnwoundJitExitFrame(act_, prevFrame);
+ }
+ } else if (isWasm()) {
+ ++asWasm();
+ } else {
+ MOZ_CRASH("unhandled case");
+ }
+ settle();
+}
+
+OnlyJSJitFrameIter::OnlyJSJitFrameIter(jit::JitActivation* act)
+ : JitFrameIter(act) {
+ settle();
+}
+
+OnlyJSJitFrameIter::OnlyJSJitFrameIter(const ActivationIterator& iter)
+ : OnlyJSJitFrameIter(iter->asJit()) {}
+
+/*****************************************************************************/
+
+void FrameIter::popActivation() {
+ ++data_.activations_;
+ settleOnActivation();
+}
+
+bool FrameIter::principalsSubsumeFrame() const {
+ // If the caller supplied principals, only show frames which are
+ // subsumed (of the same origin or of an origin accessible) by these
+ // principals.
+
+ MOZ_ASSERT(!done());
+
+ if (!data_.principals_) {
+ return true;
+ }
+
+ JSSubsumesOp subsumes = data_.cx_->runtime()->securityCallbacks->subsumes;
+ if (!subsumes) {
+ return true;
+ }
+
+ JS::AutoSuppressGCAnalysis nogc;
+ return subsumes(data_.principals_, realm()->principals());
+}
+
+void FrameIter::popInterpreterFrame() {
+ MOZ_ASSERT(data_.state_ == INTERP);
+
+ ++data_.interpFrames_;
+
+ if (data_.interpFrames_.done()) {
+ popActivation();
+ } else {
+ data_.pc_ = data_.interpFrames_.pc();
+ }
+}
+
+void FrameIter::settleOnActivation() {
+ MOZ_ASSERT(!data_.cx_->inUnsafeCallWithABI);
+
+ while (true) {
+ if (data_.activations_.done()) {
+ data_.state_ = DONE;
+ return;
+ }
+
+ Activation* activation = data_.activations_.activation();
+
+ if (activation->isJit()) {
+ data_.jitFrames_ = JitFrameIter(activation->asJit());
+ data_.jitFrames_.skipNonScriptedJSFrames();
+ if (data_.jitFrames_.done()) {
+ // It's possible to have an JitActivation with no scripted
+ // frames, for instance if we hit an over-recursion during
+ // bailout.
+ ++data_.activations_;
+ continue;
+ }
+ data_.state_ = JIT;
+ nextJitFrame();
+ return;
+ }
+
+ MOZ_ASSERT(activation->isInterpreter());
+
+ InterpreterActivation* interpAct = activation->asInterpreter();
+ data_.interpFrames_ = InterpreterFrameIterator(interpAct);
+
+ // If we OSR'ed into JIT code, skip the interpreter frame so that
+ // the same frame is not reported twice.
+ if (data_.interpFrames_.frame()->runningInJit()) {
+ ++data_.interpFrames_;
+ if (data_.interpFrames_.done()) {
+ ++data_.activations_;
+ continue;
+ }
+ }
+
+ MOZ_ASSERT(!data_.interpFrames_.frame()->runningInJit());
+ data_.pc_ = data_.interpFrames_.pc();
+ data_.state_ = INTERP;
+ return;
+ }
+}
+
+FrameIter::Data::Data(JSContext* cx, DebuggerEvalOption debuggerEvalOption,
+ JSPrincipals* principals)
+ : cx_(cx),
+ debuggerEvalOption_(debuggerEvalOption),
+ principals_(principals),
+ state_(DONE),
+ pc_(nullptr),
+ interpFrames_(nullptr),
+ activations_(cx),
+ ionInlineFrameNo_(0) {}
+
+FrameIter::Data::Data(const FrameIter::Data& other) = default;
+
+FrameIter::FrameIter(JSContext* cx, DebuggerEvalOption debuggerEvalOption)
+ : data_(cx, debuggerEvalOption, nullptr),
+ ionInlineFrames_(cx, (js::jit::JSJitFrameIter*)nullptr) {
+ settleOnActivation();
+
+ // No principals so we can see all frames.
+ MOZ_ASSERT_IF(!done(), principalsSubsumeFrame());
+}
+
+FrameIter::FrameIter(JSContext* cx, DebuggerEvalOption debuggerEvalOption,
+ JSPrincipals* principals)
+ : data_(cx, debuggerEvalOption, principals),
+ ionInlineFrames_(cx, (js::jit::JSJitFrameIter*)nullptr) {
+ settleOnActivation();
+
+ // If we're not allowed to see this frame, call operator++ to skip this (and
+ // other) cross-origin frames.
+ if (!done() && !principalsSubsumeFrame()) {
+ ++*this;
+ }
+}
+
+FrameIter::FrameIter(const FrameIter& other)
+ : data_(other.data_),
+ ionInlineFrames_(other.data_.cx_,
+ isIonScripted() ? &other.ionInlineFrames_ : nullptr) {}
+
+FrameIter::FrameIter(const Data& data)
+ : data_(data),
+ ionInlineFrames_(data.cx_, isIonScripted() ? &jsJitFrame() : nullptr) {
+ MOZ_ASSERT(data.cx_);
+ if (isIonScripted()) {
+ while (ionInlineFrames_.frameNo() != data.ionInlineFrameNo_) {
+ ++ionInlineFrames_;
+ }
+ }
+}
+
+void FrameIter::nextJitFrame() {
+ MOZ_ASSERT(data_.jitFrames_.isSome());
+
+ if (isJSJit()) {
+ if (jsJitFrame().isIonScripted()) {
+ ionInlineFrames_.resetOn(&jsJitFrame());
+ data_.pc_ = ionInlineFrames_.pc();
+ } else {
+ MOZ_ASSERT(jsJitFrame().isBaselineJS());
+ jsJitFrame().baselineScriptAndPc(nullptr, &data_.pc_);
+ }
+ return;
+ }
+
+ MOZ_ASSERT(isWasm());
+ data_.pc_ = nullptr;
+}
+
+void FrameIter::popJitFrame() {
+ MOZ_ASSERT(data_.state_ == JIT);
+ MOZ_ASSERT(data_.jitFrames_.isSome());
+
+ if (isJSJit() && jsJitFrame().isIonScripted() && ionInlineFrames_.more()) {
+ ++ionInlineFrames_;
+ data_.pc_ = ionInlineFrames_.pc();
+ return;
+ }
+
+ ++data_.jitFrames_;
+ data_.jitFrames_.skipNonScriptedJSFrames();
+
+ if (!data_.jitFrames_.done()) {
+ nextJitFrame();
+ } else {
+ data_.jitFrames_.reset();
+ popActivation();
+ }
+}
+
+FrameIter& FrameIter::operator++() {
+ while (true) {
+ switch (data_.state_) {
+ case DONE:
+ MOZ_CRASH("Unexpected state");
+ case INTERP:
+ if (interpFrame()->isDebuggerEvalFrame() &&
+ data_.debuggerEvalOption_ == FOLLOW_DEBUGGER_EVAL_PREV_LINK) {
+ AbstractFramePtr eifPrev = interpFrame()->evalInFramePrev();
+
+ popInterpreterFrame();
+
+ while (!hasUsableAbstractFramePtr() ||
+ abstractFramePtr() != eifPrev) {
+ if (data_.state_ == JIT) {
+ popJitFrame();
+ } else {
+ popInterpreterFrame();
+ }
+ }
+
+ break;
+ }
+ popInterpreterFrame();
+ break;
+ case JIT:
+ popJitFrame();
+ break;
+ }
+
+ if (done() || principalsSubsumeFrame()) {
+ break;
+ }
+ }
+
+ return *this;
+}
+
+FrameIter::Data* FrameIter::copyData() const {
+ Data* data = data_.cx_->new_<Data>(data_);
+ if (!data) {
+ return nullptr;
+ }
+
+ if (data && isIonScripted()) {
+ data->ionInlineFrameNo_ = ionInlineFrames_.frameNo();
+ }
+ return data;
+}
+
+void* FrameIter::rawFramePtr() const {
+ switch (data_.state_) {
+ case DONE:
+ return nullptr;
+ case INTERP:
+ return interpFrame();
+ case JIT:
+ if (isJSJit()) {
+ return jsJitFrame().fp();
+ }
+ MOZ_ASSERT(isWasm());
+ return nullptr;
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+JS::Compartment* FrameIter::compartment() const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ case JIT:
+ return data_.activations_->compartment();
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+Realm* FrameIter::realm() const {
+ MOZ_ASSERT(!done());
+
+ if (hasScript()) {
+ return script()->realm();
+ }
+
+ return wasmInstance()->realm();
+}
+
+bool FrameIter::isEvalFrame() const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ return interpFrame()->isEvalFrame();
+ case JIT:
+ if (isJSJit()) {
+ if (jsJitFrame().isBaselineJS()) {
+ return jsJitFrame().baselineFrame()->isEvalFrame();
+ }
+ MOZ_ASSERT(!script()->isForEval());
+ return false;
+ }
+ MOZ_ASSERT(isWasm());
+ return false;
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+bool FrameIter::isModuleFrame() const {
+ MOZ_ASSERT(!done());
+
+ if (hasScript()) {
+ return script()->isModule();
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+bool FrameIter::isFunctionFrame() const {
+ MOZ_ASSERT(!done());
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ return interpFrame()->isFunctionFrame();
+ case JIT:
+ if (isJSJit()) {
+ if (jsJitFrame().isBaselineJS()) {
+ return jsJitFrame().baselineFrame()->isFunctionFrame();
+ }
+ return script()->isFunction();
+ }
+ MOZ_ASSERT(isWasm());
+ return false;
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+JSAtom* FrameIter::maybeFunctionDisplayAtom() const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ case JIT:
+ if (isWasm()) {
+ return wasmFrame().functionDisplayAtom();
+ }
+ if (isFunctionFrame()) {
+ return calleeTemplate()->displayAtom();
+ }
+ return nullptr;
+ }
+
+ MOZ_CRASH("Unexpected state");
+}
+
+ScriptSource* FrameIter::scriptSource() const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ case JIT:
+ return script()->scriptSource();
+ }
+
+ MOZ_CRASH("Unexpected state");
+}
+
+const char* FrameIter::filename() const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ case JIT:
+ if (isWasm()) {
+ return wasmFrame().filename();
+ }
+ return script()->filename();
+ }
+
+ MOZ_CRASH("Unexpected state");
+}
+
+const char16_t* FrameIter::displayURL() const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ case JIT:
+ if (isWasm()) {
+ return wasmFrame().displayURL();
+ }
+ ScriptSource* ss = script()->scriptSource();
+ return ss->hasDisplayURL() ? ss->displayURL() : nullptr;
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+unsigned FrameIter::computeLine(uint32_t* column) const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ case JIT:
+ if (isWasm()) {
+ return wasmFrame().computeLine(column);
+ }
+ return PCToLineNumber(script(), pc(), column);
+ }
+
+ MOZ_CRASH("Unexpected state");
+}
+
+bool FrameIter::mutedErrors() const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ case JIT:
+ if (isWasm()) {
+ return wasmFrame().mutedErrors();
+ }
+ return script()->mutedErrors();
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+bool FrameIter::isConstructing() const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case JIT:
+ MOZ_ASSERT(isJSJit());
+ if (jsJitFrame().isIonScripted()) {
+ return ionInlineFrames_.isConstructing();
+ }
+ MOZ_ASSERT(jsJitFrame().isBaselineJS());
+ return jsJitFrame().isConstructing();
+ case INTERP:
+ return interpFrame()->isConstructing();
+ }
+
+ MOZ_CRASH("Unexpected state");
+}
+
+bool FrameIter::ensureHasRematerializedFrame(JSContext* cx) {
+ MOZ_ASSERT(isIon());
+ return !!activation()->asJit()->getRematerializedFrame(cx, jsJitFrame());
+}
+
+bool FrameIter::hasUsableAbstractFramePtr() const {
+ switch (data_.state_) {
+ case DONE:
+ return false;
+ case JIT:
+ if (isJSJit()) {
+ if (jsJitFrame().isBaselineJS()) {
+ return true;
+ }
+
+ MOZ_ASSERT(jsJitFrame().isIonScripted());
+ return !!activation()->asJit()->lookupRematerializedFrame(
+ jsJitFrame().fp(), ionInlineFrames_.frameNo());
+ }
+ MOZ_ASSERT(isWasm());
+ return wasmFrame().debugEnabled();
+ case INTERP:
+ return true;
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+AbstractFramePtr FrameIter::abstractFramePtr() const {
+ MOZ_ASSERT(hasUsableAbstractFramePtr());
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case JIT: {
+ if (isJSJit()) {
+ if (jsJitFrame().isBaselineJS()) {
+ return jsJitFrame().baselineFrame();
+ }
+ MOZ_ASSERT(isIonScripted());
+ return activation()->asJit()->lookupRematerializedFrame(
+ jsJitFrame().fp(), ionInlineFrames_.frameNo());
+ }
+ MOZ_ASSERT(isWasm());
+ MOZ_ASSERT(wasmFrame().debugEnabled());
+ return wasmFrame().debugFrame();
+ }
+ case INTERP:
+ MOZ_ASSERT(interpFrame());
+ return AbstractFramePtr(interpFrame());
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+void FrameIter::updatePcQuadratic() {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP: {
+ InterpreterFrame* frame = interpFrame();
+ InterpreterActivation* activation = data_.activations_->asInterpreter();
+
+ // Look for the current frame.
+ data_.interpFrames_ = InterpreterFrameIterator(activation);
+ while (data_.interpFrames_.frame() != frame) {
+ ++data_.interpFrames_;
+ }
+
+ // Update the pc.
+ MOZ_ASSERT(data_.interpFrames_.frame() == frame);
+ data_.pc_ = data_.interpFrames_.pc();
+ return;
+ }
+ case JIT:
+ if (jsJitFrame().isBaselineJS()) {
+ jit::BaselineFrame* frame = jsJitFrame().baselineFrame();
+ jit::JitActivation* activation = data_.activations_->asJit();
+
+ // activation's exitFP may be invalid, so create a new
+ // activation iterator.
+ data_.activations_ = ActivationIterator(data_.cx_);
+ while (data_.activations_.activation() != activation) {
+ ++data_.activations_;
+ }
+
+ // Look for the current frame.
+ data_.jitFrames_ = JitFrameIter(data_.activations_->asJit());
+ while (!isJSJit() || !jsJitFrame().isBaselineJS() ||
+ jsJitFrame().baselineFrame() != frame) {
+ ++data_.jitFrames_;
+ }
+
+ // Update the pc.
+ MOZ_ASSERT(jsJitFrame().baselineFrame() == frame);
+ jsJitFrame().baselineScriptAndPc(nullptr, &data_.pc_);
+ return;
+ }
+ break;
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+void FrameIter::wasmUpdateBytecodeOffset() {
+ MOZ_RELEASE_ASSERT(isWasm(), "Unexpected state");
+
+ wasm::DebugFrame* frame = wasmFrame().debugFrame();
+
+ // Relookup the current frame, updating the bytecode offset in the process.
+ data_.jitFrames_ = JitFrameIter(data_.activations_->asJit());
+ while (wasmFrame().debugFrame() != frame) {
+ ++data_.jitFrames_;
+ }
+
+ MOZ_ASSERT(wasmFrame().debugFrame() == frame);
+}
+
+JSFunction* FrameIter::calleeTemplate() const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ MOZ_ASSERT(isFunctionFrame());
+ return &interpFrame()->callee();
+ case JIT:
+ if (jsJitFrame().isBaselineJS()) {
+ return jsJitFrame().callee();
+ }
+ MOZ_ASSERT(jsJitFrame().isIonScripted());
+ return ionInlineFrames_.calleeTemplate();
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+JSFunction* FrameIter::callee(JSContext* cx) const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ return calleeTemplate();
+ case JIT:
+ if (isIonScripted()) {
+ jit::MaybeReadFallback recover(cx, activation()->asJit(),
+ &jsJitFrame());
+ return ionInlineFrames_.callee(recover);
+ }
+ MOZ_ASSERT(jsJitFrame().isBaselineJS());
+ return calleeTemplate();
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+bool FrameIter::matchCallee(JSContext* cx, JS::Handle<JSFunction*> fun) const {
+ // Use the calleeTemplate to rule out a match without needing to invalidate to
+ // find the actual callee. The real callee my be a clone of the template which
+ // should *not* be considered a match.
+ Rooted<JSFunction*> currentCallee(cx, calleeTemplate());
+
+ if (currentCallee->nargs() != fun->nargs()) {
+ return false;
+ }
+
+ if (currentCallee->flags().stableAcrossClones() !=
+ fun->flags().stableAcrossClones()) {
+ return false;
+ }
+
+ // The calleeTemplate for a callee will always have the same BaseScript. If
+ // the script clones do not use the same script, they also have a different
+ // group and Ion will not inline them interchangeably.
+ //
+ // See: js::jit::InlineFrameIterator::findNextFrame()
+ if (currentCallee->hasBaseScript()) {
+ if (currentCallee->baseScript() != fun->baseScript()) {
+ return false;
+ }
+ }
+
+ return callee(cx) == fun;
+}
+
+unsigned FrameIter::numActualArgs() const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case INTERP:
+ MOZ_ASSERT(isFunctionFrame());
+ return interpFrame()->numActualArgs();
+ case JIT:
+ if (isIonScripted()) {
+ return ionInlineFrames_.numActualArgs();
+ }
+ MOZ_ASSERT(jsJitFrame().isBaselineJS());
+ return jsJitFrame().numActualArgs();
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+unsigned FrameIter::numFormalArgs() const {
+ return script()->function()->nargs();
+}
+
+Value FrameIter::unaliasedActual(unsigned i,
+ MaybeCheckAliasing checkAliasing) const {
+ return abstractFramePtr().unaliasedActual(i, checkAliasing);
+}
+
+JSObject* FrameIter::environmentChain(JSContext* cx) const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case JIT:
+ if (isJSJit()) {
+ if (isIonScripted()) {
+ jit::MaybeReadFallback recover(cx, activation()->asJit(),
+ &jsJitFrame());
+ return ionInlineFrames_.environmentChain(recover);
+ }
+ return jsJitFrame().baselineFrame()->environmentChain();
+ }
+ MOZ_ASSERT(isWasm());
+ return wasmFrame().debugFrame()->environmentChain();
+ case INTERP:
+ return interpFrame()->environmentChain();
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+bool FrameIter::hasInitialEnvironment(JSContext* cx) const {
+ if (hasUsableAbstractFramePtr()) {
+ return abstractFramePtr().hasInitialEnvironment();
+ }
+
+ if (isWasm()) {
+ // See JSFunction::needsFunctionEnvironmentObjects().
+ return false;
+ }
+
+ MOZ_ASSERT(isJSJit());
+ MOZ_ASSERT(isIonScripted());
+
+ bool hasInitialEnv = false;
+ jit::MaybeReadFallback recover(cx, activation()->asJit(), &jsJitFrame());
+ ionInlineFrames_.environmentChain(recover, &hasInitialEnv);
+
+ return hasInitialEnv;
+}
+
+CallObject& FrameIter::callObj(JSContext* cx) const {
+ MOZ_ASSERT(calleeTemplate()->needsCallObject());
+ MOZ_ASSERT(hasInitialEnvironment(cx));
+
+ JSObject* pobj = environmentChain(cx);
+ while (!pobj->is<CallObject>()) {
+ pobj = pobj->enclosingEnvironment();
+ }
+ return pobj->as<CallObject>();
+}
+
+bool FrameIter::hasArgsObj() const { return abstractFramePtr().hasArgsObj(); }
+
+ArgumentsObject& FrameIter::argsObj() const {
+ MOZ_ASSERT(hasArgsObj());
+ return abstractFramePtr().argsObj();
+}
+
+Value FrameIter::thisArgument(JSContext* cx) const {
+ MOZ_ASSERT(isFunctionFrame());
+
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case JIT:
+ if (isIonScripted()) {
+ jit::MaybeReadFallback recover(cx, activation()->asJit(),
+ &jsJitFrame());
+ return ionInlineFrames_.thisArgument(recover);
+ }
+ return jsJitFrame().baselineFrame()->thisArgument();
+ case INTERP:
+ return interpFrame()->thisArgument();
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+Value FrameIter::returnValue() const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case JIT:
+ if (jsJitFrame().isBaselineJS()) {
+ return jsJitFrame().baselineFrame()->returnValue();
+ }
+ break;
+ case INTERP:
+ return interpFrame()->returnValue();
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+void FrameIter::setReturnValue(const Value& v) {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case JIT:
+ if (jsJitFrame().isBaselineJS()) {
+ jsJitFrame().baselineFrame()->setReturnValue(v);
+ return;
+ }
+ break;
+ case INTERP:
+ interpFrame()->setReturnValue(v);
+ return;
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+size_t FrameIter::numFrameSlots() const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case JIT: {
+ if (isIonScripted()) {
+ return ionInlineFrames_.snapshotIterator().numAllocations() -
+ ionInlineFrames_.script()->nfixed();
+ }
+ uint32_t numValueSlots = jsJitFrame().baselineFrameNumValueSlots();
+ return numValueSlots - jsJitFrame().script()->nfixed();
+ }
+ case INTERP:
+ MOZ_ASSERT(data_.interpFrames_.sp() >= interpFrame()->base());
+ return data_.interpFrames_.sp() - interpFrame()->base();
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+Value FrameIter::frameSlotValue(size_t index) const {
+ switch (data_.state_) {
+ case DONE:
+ break;
+ case JIT:
+ if (isIonScripted()) {
+ jit::SnapshotIterator si(ionInlineFrames_.snapshotIterator());
+ index += ionInlineFrames_.script()->nfixed();
+ return si.maybeReadAllocByIndex(index);
+ }
+ index += jsJitFrame().script()->nfixed();
+ return *jsJitFrame().baselineFrame()->valueSlot(index);
+ case INTERP:
+ return interpFrame()->base()[index];
+ }
+ MOZ_CRASH("Unexpected state");
+}
+
+#ifdef DEBUG
+bool js::SelfHostedFramesVisible() {
+ static bool checked = false;
+ static bool visible = false;
+ if (!checked) {
+ checked = true;
+ char* env = getenv("MOZ_SHOW_ALL_JS_FRAMES");
+ visible = !!env;
+ }
+ return visible;
+}
+#endif
+
+void NonBuiltinFrameIter::settle() {
+ if (!SelfHostedFramesVisible()) {
+ while (!done() && hasScript() && script()->selfHosted()) {
+ FrameIter::operator++();
+ }
+ }
+}
+
+void NonBuiltinScriptFrameIter::settle() {
+ if (!SelfHostedFramesVisible()) {
+ while (!done() && script()->selfHosted()) {
+ ScriptFrameIter::operator++();
+ }
+ }
+}
+
+bool FrameIter::inPrologue() const {
+ if (pc() < script()->main()) {
+ return true;
+ }
+ // If we do a VM call before pushing locals in baseline, the stack frame will
+ // not include space for those locals.
+ if (pc() == script()->code() && isBaseline() &&
+ jsJitFrame().baselineFrameNumValueSlots() < script()->nfixed()) {
+ return true;
+ }
+
+ return false;
+}
diff --git a/js/src/vm/FrameIter.h b/js/src/vm/FrameIter.h
new file mode 100644
index 0000000000..c3561e0247
--- /dev/null
+++ b/js/src/vm/FrameIter.h
@@ -0,0 +1,586 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_FrameIter_h
+#define vm_FrameIter_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Attributes.h" // MOZ_IMPLICIT, MOZ_RAII
+#include "mozilla/MaybeOneOf.h" // mozilla::MaybeOneOf
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t, uintptr_t
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "jit/JSJitFrameIter.h" // js::jit::{InlineFrameIterator,JSJitFrameIter}
+#include "js/RootingAPI.h" // JS::Handle, JS::Rooted
+#include "js/TypeDecls.h" // jsbytecode, JSContext, JSAtom, JSFunction, JSObject, JSScript
+#include "js/Value.h" // JS::Value
+#include "vm/Activation.h" // js::InterpreterActivation
+#include "vm/Stack.h" // js::{AbstractFramePtr,MaybeCheckAliasing}
+#include "wasm/WasmFrameIter.h" // js::wasm::{ExitReason,RegisterState,WasmFrameIter}
+
+struct JSPrincipals;
+
+namespace JS {
+
+class JS_PUBLIC_API Compartment;
+class JS_PUBLIC_API Realm;
+
+} // namespace JS
+
+namespace js {
+
+class ArgumentsObject;
+class CallObject;
+
+namespace jit {
+class CommonFrameLayout;
+class JitActivation;
+} // namespace jit
+
+namespace wasm {
+class Instance;
+} // namespace wasm
+
+// Iterates over the frames of a single InterpreterActivation.
+class InterpreterFrameIterator {
+ InterpreterActivation* activation_;
+ InterpreterFrame* fp_;
+ jsbytecode* pc_;
+ JS::Value* sp_;
+
+ public:
+ explicit InterpreterFrameIterator(InterpreterActivation* activation)
+ : activation_(activation), fp_(nullptr), pc_(nullptr), sp_(nullptr) {
+ if (activation) {
+ fp_ = activation->current();
+ pc_ = activation->regs().pc;
+ sp_ = activation->regs().sp;
+ }
+ }
+
+ InterpreterFrame* frame() const {
+ MOZ_ASSERT(!done());
+ return fp_;
+ }
+ jsbytecode* pc() const {
+ MOZ_ASSERT(!done());
+ return pc_;
+ }
+ JS::Value* sp() const {
+ MOZ_ASSERT(!done());
+ return sp_;
+ }
+
+ InterpreterFrameIterator& operator++();
+
+ bool done() const { return fp_ == nullptr; }
+};
+
+// A JitFrameIter can iterate over all kind of frames emitted by our code
+// generators, be they composed of JS jit frames or wasm frames, interleaved or
+// not, in any order.
+//
+// In the following class:
+// - code generated for JS is referred to as JSJit.
+// - code generated for wasm is referred to as Wasm.
+// Also, Jit refers to any one of them.
+//
+// JitFrameIter uses JSJitFrameIter to iterate over JSJit code or a
+// WasmFrameIter to iterate over wasm code; only one of them is active at the
+// time. When a sub-iterator is done, the JitFrameIter knows how to stop, move
+// onto the next activation or move onto another kind of Jit code.
+//
+// For ease of use, there is also OnlyJSJitFrameIter, which skips all the
+// non-JSJit frames.
+//
+// Note it is allowed to get a handle to the internal frame iterator via
+// asJSJit() and asWasm(), but the user has to be careful not to have those be
+// used after JitFrameIter leaves the scope or the operator++ is called.
+//
+// In particular, this can handle the transition from wasm to jit and from jit
+// to wasm, since these can be interleaved in the same JitActivation.
+class JitFrameIter {
+ protected:
+ jit::JitActivation* act_ = nullptr;
+ mozilla::MaybeOneOf<jit::JSJitFrameIter, wasm::WasmFrameIter> iter_ = {};
+ bool mustUnwindActivation_ = false;
+
+ void settle();
+
+ public:
+ JitFrameIter() = default;
+
+ explicit JitFrameIter(jit::JitActivation* activation,
+ bool mustUnwindActivation = false);
+
+ explicit JitFrameIter(const JitFrameIter& another);
+ JitFrameIter& operator=(const JitFrameIter& another);
+
+ bool isSome() const { return !iter_.empty(); }
+ void reset() {
+ MOZ_ASSERT(isSome());
+ iter_.destroy();
+ }
+
+ bool isJSJit() const {
+ return isSome() && iter_.constructed<jit::JSJitFrameIter>();
+ }
+ jit::JSJitFrameIter& asJSJit() { return iter_.ref<jit::JSJitFrameIter>(); }
+ const jit::JSJitFrameIter& asJSJit() const {
+ return iter_.ref<jit::JSJitFrameIter>();
+ }
+
+ bool isWasm() const {
+ return isSome() && iter_.constructed<wasm::WasmFrameIter>();
+ }
+ wasm::WasmFrameIter& asWasm() { return iter_.ref<wasm::WasmFrameIter>(); }
+ const wasm::WasmFrameIter& asWasm() const {
+ return iter_.ref<wasm::WasmFrameIter>();
+ }
+
+ // Operations common to all frame iterators.
+ const jit::JitActivation* activation() const { return act_; }
+ bool done() const;
+ void operator++();
+
+ JS::Realm* realm() const;
+
+ // Returns the address of the next instruction that will execute in this
+ // frame, once control returns to this frame.
+ uint8_t* resumePCinCurrentFrame() const;
+
+ // Operations which have an effect only on JIT frames.
+ void skipNonScriptedJSFrames();
+
+ // Returns true iff this is a JIT frame with a self-hosted script. Note: be
+ // careful, JitFrameIter does not consider functions inlined by Ion.
+ bool isSelfHostedIgnoringInlining() const;
+};
+
+// A JitFrameIter that skips all the non-JSJit frames, skipping interleaved
+// frames of any another kind.
+
+class OnlyJSJitFrameIter : public JitFrameIter {
+ void settle() {
+ while (!done() && !isJSJit()) {
+ JitFrameIter::operator++();
+ }
+ }
+
+ public:
+ explicit OnlyJSJitFrameIter(jit::JitActivation* act);
+ explicit OnlyJSJitFrameIter(const ActivationIterator& cx);
+
+ void operator++() {
+ JitFrameIter::operator++();
+ settle();
+ }
+
+ const jit::JSJitFrameIter& frame() const { return asJSJit(); }
+};
+
+class ScriptSource;
+
+// A FrameIter walks over a context's stack of JS script activations,
+// abstracting over whether the JS scripts were running in the interpreter or
+// different modes of compiled code.
+//
+// FrameIter is parameterized by what it includes in the stack iteration:
+// - When provided, the optional JSPrincipal argument will cause FrameIter to
+// only show frames in globals whose JSPrincipals are subsumed (via
+// JSSecurityCallbacks::subsume) by the given JSPrincipal.
+//
+// Additionally, there are derived FrameIter types that automatically skip
+// certain frames:
+// - ScriptFrameIter only shows frames that have an associated JSScript
+// (currently everything other than wasm stack frames). When !hasScript(),
+// clients must stick to the portion of the
+// interface marked below.
+// - NonBuiltinScriptFrameIter additionally filters out builtin (self-hosted)
+// scripts.
+class FrameIter {
+ public:
+ enum DebuggerEvalOption {
+ FOLLOW_DEBUGGER_EVAL_PREV_LINK,
+ IGNORE_DEBUGGER_EVAL_PREV_LINK
+ };
+
+ enum State {
+ DONE, // when there are no more frames nor activations to unwind.
+ INTERP, // interpreter activation on the stack
+ JIT // jit or wasm activations on the stack
+ };
+
+ // Unlike ScriptFrameIter itself, ScriptFrameIter::Data can be allocated on
+ // the heap, so this structure should not contain any GC things.
+ struct Data {
+ JSContext* cx_;
+ DebuggerEvalOption debuggerEvalOption_;
+ JSPrincipals* principals_;
+
+ State state_;
+
+ jsbytecode* pc_;
+
+ InterpreterFrameIterator interpFrames_;
+ ActivationIterator activations_;
+
+ JitFrameIter jitFrames_;
+ unsigned ionInlineFrameNo_;
+
+ Data(JSContext* cx, DebuggerEvalOption debuggerEvalOption,
+ JSPrincipals* principals);
+ Data(const Data& other);
+ };
+
+ explicit FrameIter(JSContext* cx,
+ DebuggerEvalOption = FOLLOW_DEBUGGER_EVAL_PREV_LINK);
+ FrameIter(JSContext* cx, DebuggerEvalOption, JSPrincipals*);
+ FrameIter(const FrameIter& iter);
+ MOZ_IMPLICIT FrameIter(const Data& data);
+ MOZ_IMPLICIT FrameIter(AbstractFramePtr frame);
+
+ bool done() const { return data_.state_ == DONE; }
+
+ // -------------------------------------------------------
+ // The following functions can only be called when !done()
+ // -------------------------------------------------------
+
+ FrameIter& operator++();
+
+ JS::Realm* realm() const;
+ JS::Compartment* compartment() const;
+ Activation* activation() const { return data_.activations_.activation(); }
+
+ bool isInterp() const {
+ MOZ_ASSERT(!done());
+ return data_.state_ == INTERP;
+ }
+ bool isJSJit() const {
+ MOZ_ASSERT(!done());
+ return data_.state_ == JIT && data_.jitFrames_.isJSJit();
+ }
+ bool isWasm() const {
+ MOZ_ASSERT(!done());
+ return data_.state_ == JIT && data_.jitFrames_.isWasm();
+ }
+
+ inline bool isIon() const;
+ inline bool isBaseline() const;
+ inline bool isPhysicalJitFrame() const;
+
+ bool isEvalFrame() const;
+ bool isModuleFrame() const;
+ bool isFunctionFrame() const;
+ bool hasArgs() const { return isFunctionFrame(); }
+
+ ScriptSource* scriptSource() const;
+ const char* filename() const;
+ const char16_t* displayURL() const;
+ unsigned computeLine(uint32_t* column = nullptr) const;
+ JSAtom* maybeFunctionDisplayAtom() const;
+ bool mutedErrors() const;
+
+ bool hasScript() const { return !isWasm(); }
+
+ // -----------------------------------------------------------
+ // The following functions can only be called when isWasm()
+ // -----------------------------------------------------------
+
+ inline bool wasmDebugEnabled() const;
+ inline wasm::Instance* wasmInstance() const;
+ inline uint32_t wasmFuncIndex() const;
+ inline unsigned wasmBytecodeOffset() const;
+ void wasmUpdateBytecodeOffset();
+
+ // -----------------------------------------------------------
+ // The following functions can only be called when hasScript()
+ // -----------------------------------------------------------
+
+ inline JSScript* script() const;
+
+ bool isConstructing() const;
+ jsbytecode* pc() const {
+ MOZ_ASSERT(!done());
+ return data_.pc_;
+ }
+ void updatePcQuadratic();
+
+ // The function |calleeTemplate()| returns either the function from which
+ // the current |callee| was cloned or the |callee| if it can be read. As
+ // long as we do not have to investigate the environment chain or build a
+ // new frame, we should prefer to use |calleeTemplate| instead of
+ // |callee|, as requesting the |callee| might cause the invalidation of
+ // the frame. (see js::Lambda)
+ JSFunction* calleeTemplate() const;
+ JSFunction* callee(JSContext* cx) const;
+
+ JSFunction* maybeCallee(JSContext* cx) const {
+ return isFunctionFrame() ? callee(cx) : nullptr;
+ }
+
+ bool matchCallee(JSContext* cx, JS::Handle<JSFunction*> fun) const;
+
+ unsigned numActualArgs() const;
+ unsigned numFormalArgs() const;
+ JS::Value unaliasedActual(unsigned i,
+ MaybeCheckAliasing = CHECK_ALIASING) const;
+ template <class Op>
+ inline void unaliasedForEachActual(JSContext* cx, Op op);
+
+ JSObject* environmentChain(JSContext* cx) const;
+ bool hasInitialEnvironment(JSContext* cx) const;
+ CallObject& callObj(JSContext* cx) const;
+
+ bool hasArgsObj() const;
+ ArgumentsObject& argsObj() const;
+
+ // Get the original |this| value passed to this function. May not be the
+ // actual this-binding (for instance, derived class constructors will
+ // change their this-value later and non-strict functions will box
+ // primitives).
+ JS::Value thisArgument(JSContext* cx) const;
+
+ JS::Value returnValue() const;
+ void setReturnValue(const JS::Value& v);
+
+ // These are only valid for the top frame.
+ size_t numFrameSlots() const;
+ JS::Value frameSlotValue(size_t index) const;
+
+ // Ensures that we have rematerialized the top frame and its associated
+ // inline frames. Can only be called when isIon().
+ bool ensureHasRematerializedFrame(JSContext* cx);
+
+ // True when isInterp() or isBaseline(). True when isIon() if it
+ // has a rematerialized frame. False otherwise.
+ bool hasUsableAbstractFramePtr() const;
+
+ // -----------------------------------------------------------
+ // The following functions can only be called when isInterp(),
+ // isBaseline(), isWasm() or isIon(). Further, abstractFramePtr() can
+ // only be called when hasUsableAbstractFramePtr().
+ // -----------------------------------------------------------
+
+ AbstractFramePtr abstractFramePtr() const;
+ Data* copyData() const;
+
+ // This can only be called when isInterp():
+ inline InterpreterFrame* interpFrame() const;
+
+ // This can only be called when isPhysicalJitFrame():
+ inline jit::CommonFrameLayout* physicalJitFrame() const;
+
+ // This is used to provide a raw interface for debugging.
+ void* rawFramePtr() const;
+
+ bool inPrologue() const;
+
+ private:
+ Data data_;
+ jit::InlineFrameIterator ionInlineFrames_;
+
+ const jit::JSJitFrameIter& jsJitFrame() const {
+ return data_.jitFrames_.asJSJit();
+ }
+ const wasm::WasmFrameIter& wasmFrame() const {
+ return data_.jitFrames_.asWasm();
+ }
+
+ jit::JSJitFrameIter& jsJitFrame() { return data_.jitFrames_.asJSJit(); }
+ wasm::WasmFrameIter& wasmFrame() { return data_.jitFrames_.asWasm(); }
+
+ bool isIonScripted() const {
+ return isJSJit() && jsJitFrame().isIonScripted();
+ }
+
+ bool principalsSubsumeFrame() const;
+
+ void popActivation();
+ void popInterpreterFrame();
+ void nextJitFrame();
+ void popJitFrame();
+ void settleOnActivation();
+};
+
+class ScriptFrameIter : public FrameIter {
+ void settle() {
+ while (!done() && !hasScript()) {
+ FrameIter::operator++();
+ }
+ }
+
+ public:
+ explicit ScriptFrameIter(
+ JSContext* cx,
+ DebuggerEvalOption debuggerEvalOption = FOLLOW_DEBUGGER_EVAL_PREV_LINK)
+ : FrameIter(cx, debuggerEvalOption) {
+ settle();
+ }
+
+ ScriptFrameIter& operator++() {
+ FrameIter::operator++();
+ settle();
+ return *this;
+ }
+};
+
+#ifdef DEBUG
+bool SelfHostedFramesVisible();
+#else
+static inline bool SelfHostedFramesVisible() { return false; }
+#endif
+
+/* A filtering of the FrameIter to only stop at non-self-hosted scripts. */
+class NonBuiltinFrameIter : public FrameIter {
+ void settle();
+
+ public:
+ explicit NonBuiltinFrameIter(
+ JSContext* cx, FrameIter::DebuggerEvalOption debuggerEvalOption =
+ FrameIter::FOLLOW_DEBUGGER_EVAL_PREV_LINK)
+ : FrameIter(cx, debuggerEvalOption) {
+ settle();
+ }
+
+ NonBuiltinFrameIter(JSContext* cx,
+ FrameIter::DebuggerEvalOption debuggerEvalOption,
+ JSPrincipals* principals)
+ : FrameIter(cx, debuggerEvalOption, principals) {
+ settle();
+ }
+
+ NonBuiltinFrameIter(JSContext* cx, JSPrincipals* principals)
+ : FrameIter(cx, FrameIter::FOLLOW_DEBUGGER_EVAL_PREV_LINK, principals) {
+ settle();
+ }
+
+ NonBuiltinFrameIter& operator++() {
+ FrameIter::operator++();
+ settle();
+ return *this;
+ }
+};
+
+// A filtering of the ScriptFrameIter to only stop at non-self-hosted scripts.
+class NonBuiltinScriptFrameIter : public ScriptFrameIter {
+ void settle();
+
+ public:
+ explicit NonBuiltinScriptFrameIter(
+ JSContext* cx, ScriptFrameIter::DebuggerEvalOption debuggerEvalOption =
+ ScriptFrameIter::FOLLOW_DEBUGGER_EVAL_PREV_LINK)
+ : ScriptFrameIter(cx, debuggerEvalOption) {
+ settle();
+ }
+
+ NonBuiltinScriptFrameIter& operator++() {
+ ScriptFrameIter::operator++();
+ settle();
+ return *this;
+ }
+};
+
+/*
+ * Blindly iterate over all frames in the current thread's stack. These frames
+ * can be from different contexts and compartments, so beware.
+ */
+class AllFramesIter : public FrameIter {
+ public:
+ explicit AllFramesIter(JSContext* cx)
+ : FrameIter(cx, ScriptFrameIter::IGNORE_DEBUGGER_EVAL_PREV_LINK) {}
+};
+
+/* Iterates over all script frame in the current thread's stack.
+ * See also AllFramesIter and ScriptFrameIter.
+ */
+class AllScriptFramesIter : public ScriptFrameIter {
+ public:
+ explicit AllScriptFramesIter(JSContext* cx)
+ : ScriptFrameIter(cx, ScriptFrameIter::IGNORE_DEBUGGER_EVAL_PREV_LINK) {}
+};
+
+/* Popular inline definitions. */
+
+inline JSScript* FrameIter::script() const {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(hasScript());
+ if (data_.state_ == INTERP) {
+ return interpFrame()->script();
+ }
+ if (jsJitFrame().isIonJS()) {
+ return ionInlineFrames_.script();
+ }
+ return jsJitFrame().script();
+}
+
+inline bool FrameIter::wasmDebugEnabled() const {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(isWasm());
+ return wasmFrame().debugEnabled();
+}
+
+inline wasm::Instance* FrameIter::wasmInstance() const {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(isWasm());
+ return wasmFrame().instance();
+}
+
+inline unsigned FrameIter::wasmBytecodeOffset() const {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(isWasm());
+ return wasmFrame().lineOrBytecode();
+}
+
+inline uint32_t FrameIter::wasmFuncIndex() const {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(isWasm());
+ return wasmFrame().funcIndex();
+}
+
+inline bool FrameIter::isIon() const {
+ return isJSJit() && jsJitFrame().isIonJS();
+}
+
+inline bool FrameIter::isBaseline() const {
+ return isJSJit() && jsJitFrame().isBaselineJS();
+}
+
+inline InterpreterFrame* FrameIter::interpFrame() const {
+ MOZ_ASSERT(data_.state_ == INTERP);
+ return data_.interpFrames_.frame();
+}
+
+inline bool FrameIter::isPhysicalJitFrame() const {
+ if (!isJSJit()) {
+ return false;
+ }
+
+ auto& jitFrame = jsJitFrame();
+
+ if (jitFrame.isBaselineJS()) {
+ return true;
+ }
+
+ if (jitFrame.isIonScripted()) {
+ // Only the bottom of a group of inlined Ion frames is a physical frame.
+ return ionInlineFrames_.frameNo() == 0;
+ }
+
+ return false;
+}
+
+inline jit::CommonFrameLayout* FrameIter::physicalJitFrame() const {
+ MOZ_ASSERT(isPhysicalJitFrame());
+ return jsJitFrame().current();
+}
+
+} // namespace js
+
+#endif // vm_FrameIter_h
diff --git a/js/src/vm/FunctionFlags.cpp b/js/src/vm/FunctionFlags.cpp
new file mode 100644
index 0000000000..e4b0e1e0c9
--- /dev/null
+++ b/js/src/vm/FunctionFlags.cpp
@@ -0,0 +1,13 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/FunctionFlags.h" // js::FunctionFlags::Flags
+#include "jsfriendapi.h" // js::JS_FUNCTION_INTERPRETED_BITS
+
+static_assert((js::FunctionFlags::Flags::BASESCRIPT |
+ js::FunctionFlags::Flags::SELFHOSTLAZY) ==
+ js::JS_FUNCTION_INTERPRETED_BITS,
+ "jsfriendapi.h's FunctionFlags::INTERPRETED-alike is wrong");
diff --git a/js/src/vm/FunctionFlags.h b/js/src/vm/FunctionFlags.h
new file mode 100644
index 0000000000..a757bf4ac5
--- /dev/null
+++ b/js/src/vm/FunctionFlags.h
@@ -0,0 +1,320 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_FunctionFlags_h
+#define vm_FunctionFlags_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_ASSERT_IF
+#include "mozilla/Attributes.h" // MOZ_IMPLICIT
+
+#include <stdint.h> // uint8_t, uint16_t
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+class JS_PUBLIC_API JSAtom;
+
+namespace js {
+
+class FunctionFlags {
+ public:
+ enum FunctionKind : uint8_t {
+ NormalFunction = 0,
+ Arrow, // ES6 '(args) => body' syntax
+ Method, // ES6 MethodDefinition
+ ClassConstructor,
+ Getter,
+ Setter,
+ AsmJS, // An asm.js module or exported function
+ Wasm, // An exported WebAssembly function
+ FunctionKindLimit
+ };
+
+ enum Flags : uint16_t {
+ // The general kind of a function. This is used to describe characteristics
+ // of functions that do not merit a dedicated flag bit below.
+ FUNCTION_KIND_SHIFT = 0,
+ FUNCTION_KIND_MASK = 0x0007,
+
+ // The AllocKind used was FunctionExtended and extra slots were allocated.
+ // These slots may be used by the engine or the embedding so care must be
+ // taken to avoid conflicts.
+ EXTENDED = 1 << 3,
+
+ // Set if function is a self-hosted builtin or intrinsic. An 'intrinsic'
+ // here means a native function used inside self-hosted code. In general, a
+ // self-hosted function should appear to script as though it were a native
+ // builtin.
+ SELF_HOSTED = 1 << 4,
+
+ // An interpreted function has or may-have bytecode and an environment. Only
+ // one of these flags may be used at a time. As a memory optimization, the
+ // SELFHOSTLAZY flag indicates there is no js::BaseScript at all and we must
+ // clone from the self-hosted realm in order to get bytecode.
+ BASESCRIPT = 1 << 5,
+ SELFHOSTLAZY = 1 << 6,
+
+ // Function may be called as a constructor. This corresponds in the spec as
+ // having a [[Construct]] internal method.
+ CONSTRUCTOR = 1 << 7,
+
+ // (1 << 8) is unused.
+
+ // Function comes from a FunctionExpression, ArrowFunction, or Function()
+ // call (not a FunctionDeclaration or nonstandard function-statement).
+ LAMBDA = 1 << 9,
+
+ // The WASM function has a JIT entry which emulates the
+ // js::BaseScript::jitCodeRaw mechanism.
+ WASM_JIT_ENTRY = 1 << 10,
+
+ // Function had no explicit name, but a name was set by SetFunctionName at
+ // compile time or SetFunctionName at runtime.
+ HAS_INFERRED_NAME = 1 << 11,
+
+ // Function had no explicit name, but a name was guessed for it anyway.
+ HAS_GUESSED_ATOM = 1 << 12,
+
+ // The 'length' or 'name property has been resolved. See fun_resolve.
+ RESOLVED_NAME = 1 << 13,
+ RESOLVED_LENGTH = 1 << 14,
+
+ // This function is kept only for skipping it over during delazification.
+ //
+ // This function is inside arrow function's parameter expression, and
+ // parsed twice, once before finding "=>" token, and once after finding
+ // "=>" and rewinding to the beginning of the parameters.
+ // ScriptStencil is created for both case, and the first one is kept only
+ // for delazification, to make sure delazification sees the same sequence
+ // of inner function to skip over.
+ //
+ // We call the first one "ghost".
+ // It should be kept lazy, and shouldn't be exposed to debugger.
+ GHOST_FUNCTION = 1 << 15,
+
+ // Shifted form of FunctionKinds.
+ NORMAL_KIND = NormalFunction << FUNCTION_KIND_SHIFT,
+ ASMJS_KIND = AsmJS << FUNCTION_KIND_SHIFT,
+ WASM_KIND = Wasm << FUNCTION_KIND_SHIFT,
+ ARROW_KIND = Arrow << FUNCTION_KIND_SHIFT,
+ METHOD_KIND = Method << FUNCTION_KIND_SHIFT,
+ CLASSCONSTRUCTOR_KIND = ClassConstructor << FUNCTION_KIND_SHIFT,
+ GETTER_KIND = Getter << FUNCTION_KIND_SHIFT,
+ SETTER_KIND = Setter << FUNCTION_KIND_SHIFT,
+
+ // Derived Flags combinations to use when creating functions.
+ NATIVE_FUN = NORMAL_KIND,
+ NATIVE_CTOR = CONSTRUCTOR | NORMAL_KIND,
+ ASMJS_CTOR = CONSTRUCTOR | ASMJS_KIND,
+ ASMJS_LAMBDA_CTOR = CONSTRUCTOR | LAMBDA | ASMJS_KIND,
+ WASM = WASM_KIND,
+ INTERPRETED_NORMAL = BASESCRIPT | CONSTRUCTOR | NORMAL_KIND,
+ INTERPRETED_CLASS_CTOR = BASESCRIPT | CONSTRUCTOR | CLASSCONSTRUCTOR_KIND,
+ INTERPRETED_GENERATOR_OR_ASYNC = BASESCRIPT | NORMAL_KIND,
+ INTERPRETED_LAMBDA = BASESCRIPT | LAMBDA | CONSTRUCTOR | NORMAL_KIND,
+ INTERPRETED_LAMBDA_ARROW = BASESCRIPT | LAMBDA | ARROW_KIND,
+ INTERPRETED_LAMBDA_GENERATOR_OR_ASYNC = BASESCRIPT | LAMBDA | NORMAL_KIND,
+ INTERPRETED_GETTER = BASESCRIPT | GETTER_KIND,
+ INTERPRETED_SETTER = BASESCRIPT | SETTER_KIND,
+ INTERPRETED_METHOD = BASESCRIPT | METHOD_KIND,
+
+ // Flags that XDR ignores. See also: js::BaseScript::MutableFlags.
+ MUTABLE_FLAGS = RESOLVED_NAME | RESOLVED_LENGTH,
+
+ // Flags preserved when cloning a function.
+ STABLE_ACROSS_CLONES =
+ CONSTRUCTOR | LAMBDA | SELF_HOSTED | FUNCTION_KIND_MASK | GHOST_FUNCTION
+ };
+
+ uint16_t flags_;
+
+ public:
+ FunctionFlags() : flags_() {
+ static_assert(sizeof(FunctionFlags) == sizeof(flags_),
+ "No extra members allowed is it'll grow JSFunction");
+ static_assert(offsetof(FunctionFlags, flags_) == 0,
+ "Required for JIT flag access");
+ }
+
+ explicit FunctionFlags(uint16_t flags) : flags_(flags) {}
+ MOZ_IMPLICIT FunctionFlags(Flags f) : flags_(f) {}
+
+ static_assert(((FunctionKindLimit - 1) << FUNCTION_KIND_SHIFT) <=
+ FUNCTION_KIND_MASK,
+ "FunctionKind doesn't fit into flags_");
+
+ uint16_t toRaw() const { return flags_; }
+
+ uint16_t stableAcrossClones() const { return flags_ & STABLE_ACROSS_CLONES; }
+
+ // For flag combinations the type is int.
+ bool hasFlags(uint16_t flags) const { return flags_ & flags; }
+ FunctionFlags& setFlags(uint16_t flags) {
+ flags_ |= flags;
+ return *this;
+ }
+ FunctionFlags& clearFlags(uint16_t flags) {
+ flags_ &= ~flags;
+ return *this;
+ }
+ FunctionFlags& setFlags(uint16_t flags, bool set) {
+ if (set) {
+ setFlags(flags);
+ } else {
+ clearFlags(flags);
+ }
+ return *this;
+ }
+
+ FunctionKind kind() const {
+ return static_cast<FunctionKind>((flags_ & FUNCTION_KIND_MASK) >>
+ FUNCTION_KIND_SHIFT);
+ }
+
+ /* A function can be classified as either native (C++) or interpreted (JS): */
+ bool isInterpreted() const {
+ return hasFlags(BASESCRIPT) || hasFlags(SELFHOSTLAZY);
+ }
+ bool isNativeFun() const { return !isInterpreted(); }
+
+ bool isConstructor() const { return hasFlags(CONSTRUCTOR); }
+
+ bool isNonBuiltinConstructor() const {
+ // Note: keep this in sync with branchIfNotFunctionIsNonBuiltinCtor in
+ // MacroAssembler.cpp.
+ return hasFlags(BASESCRIPT) && hasFlags(CONSTRUCTOR) &&
+ !hasFlags(SELF_HOSTED);
+ }
+
+ /* Possible attributes of a native function: */
+ bool isAsmJSNative() const {
+ MOZ_ASSERT_IF(kind() == AsmJS, isNativeFun());
+ return kind() == AsmJS;
+ }
+ bool isWasm() const {
+ MOZ_ASSERT_IF(kind() == Wasm, isNativeFun());
+ return kind() == Wasm;
+ }
+ bool isWasmWithJitEntry() const {
+ MOZ_ASSERT_IF(hasFlags(WASM_JIT_ENTRY), isWasm());
+ return hasFlags(WASM_JIT_ENTRY);
+ }
+ bool isNativeWithoutJitEntry() const {
+ MOZ_ASSERT_IF(!hasJitEntry(), isNativeFun());
+ return !hasJitEntry();
+ }
+ bool isBuiltinNative() const {
+ return isNativeFun() && !isAsmJSNative() && !isWasm();
+ }
+ bool hasJitEntry() const {
+ return hasBaseScript() || hasSelfHostedLazyScript() || isWasmWithJitEntry();
+ }
+
+ /* Possible attributes of an interpreted function: */
+ bool hasInferredName() const { return hasFlags(HAS_INFERRED_NAME); }
+ bool hasGuessedAtom() const { return hasFlags(HAS_GUESSED_ATOM); }
+ bool isLambda() const { return hasFlags(LAMBDA); }
+
+ bool isNamedLambda(bool hasName) const {
+ return hasName && isLambda() && !hasInferredName() && !hasGuessedAtom();
+ }
+
+ // These methods determine which of the u.scripted.s union arms are active.
+ // For live JSFunctions the pointer values will always be non-null, but due
+ // to partial initialization the GC (and other features that scan the heap
+ // directly) may still return a null pointer.
+ bool hasBaseScript() const { return hasFlags(BASESCRIPT); }
+ bool hasSelfHostedLazyScript() const { return hasFlags(SELFHOSTLAZY); }
+
+ // Arrow functions store their lexical new.target in the first extended slot.
+ bool isArrow() const { return kind() == Arrow; }
+ // Every class-constructor is also a method.
+ bool isMethod() const {
+ return kind() == Method || kind() == ClassConstructor;
+ }
+ bool isClassConstructor() const { return kind() == ClassConstructor; }
+
+ bool isGetter() const { return kind() == Getter; }
+ bool isSetter() const { return kind() == Setter; }
+
+ bool allowSuperProperty() const {
+ return isMethod() || isGetter() || isSetter();
+ }
+
+ bool hasResolvedLength() const { return hasFlags(RESOLVED_LENGTH); }
+ bool hasResolvedName() const { return hasFlags(RESOLVED_NAME); }
+
+ bool isSelfHostedOrIntrinsic() const { return hasFlags(SELF_HOSTED); }
+ bool isSelfHostedBuiltin() const {
+ return isSelfHostedOrIntrinsic() && !isNativeFun();
+ }
+ bool isIntrinsic() const {
+ return isSelfHostedOrIntrinsic() && isNativeFun();
+ }
+
+ FunctionFlags& setKind(FunctionKind kind) {
+ this->flags_ &= ~FUNCTION_KIND_MASK;
+ this->flags_ |= static_cast<uint16_t>(kind) << FUNCTION_KIND_SHIFT;
+ return *this;
+ }
+
+ // Make the function constructible.
+ FunctionFlags& setIsConstructor() {
+ MOZ_ASSERT(!isConstructor());
+ MOZ_ASSERT(isSelfHostedBuiltin());
+ return setFlags(CONSTRUCTOR);
+ }
+
+ FunctionFlags& setIsSelfHostedBuiltin() {
+ MOZ_ASSERT(isInterpreted());
+ MOZ_ASSERT(!isSelfHostedBuiltin());
+ setFlags(SELF_HOSTED);
+ // Self-hosted functions should not be constructable.
+ return clearFlags(CONSTRUCTOR);
+ }
+ FunctionFlags& setIsIntrinsic() {
+ MOZ_ASSERT(isNativeFun());
+ MOZ_ASSERT(!isIntrinsic());
+ return setFlags(SELF_HOSTED);
+ }
+
+ FunctionFlags& setResolvedLength() { return setFlags(RESOLVED_LENGTH); }
+ FunctionFlags& setResolvedName() { return setFlags(RESOLVED_NAME); }
+
+ FunctionFlags& setInferredName() { return setFlags(HAS_INFERRED_NAME); }
+
+ FunctionFlags& setGuessedAtom() { return setFlags(HAS_GUESSED_ATOM); }
+
+ FunctionFlags& setSelfHostedLazy() { return setFlags(SELFHOSTLAZY); }
+ FunctionFlags& clearSelfHostedLazy() { return clearFlags(SELFHOSTLAZY); }
+ FunctionFlags& setBaseScript() { return setFlags(BASESCRIPT); }
+ FunctionFlags& clearBaseScript() { return clearFlags(BASESCRIPT); }
+
+ FunctionFlags& setWasmJitEntry() { return setFlags(WASM_JIT_ENTRY); }
+
+ bool isExtended() const { return hasFlags(EXTENDED); }
+ FunctionFlags& setIsExtended() { return setFlags(EXTENDED); }
+
+ bool isNativeConstructor() const { return hasFlags(NATIVE_CTOR); }
+
+ FunctionFlags& setIsGhost() { return setFlags(GHOST_FUNCTION); }
+ bool isGhost() const { return hasFlags(GHOST_FUNCTION); }
+
+ static uint16_t HasJitEntryFlags(bool isConstructing) {
+ uint16_t flags = BASESCRIPT | SELFHOSTLAZY;
+ if (!isConstructing) {
+ flags |= WASM_JIT_ENTRY;
+ }
+ return flags;
+ }
+
+ static FunctionFlags clearMutableflags(FunctionFlags flags) {
+ return FunctionFlags(flags.toRaw() & ~FunctionFlags::MUTABLE_FLAGS);
+ }
+};
+
+} /* namespace js */
+
+#endif /* vm_FunctionFlags_h */
diff --git a/js/src/vm/FunctionPrefixKind.h b/js/src/vm/FunctionPrefixKind.h
new file mode 100644
index 0000000000..d015091647
--- /dev/null
+++ b/js/src/vm/FunctionPrefixKind.h
@@ -0,0 +1,18 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_FunctionPrefixKind_h
+#define vm_FunctionPrefixKind_h
+
+#include <stdint.h> // uint8_t
+
+namespace js {
+
+enum class FunctionPrefixKind : uint8_t { None, Get, Set };
+
+} // namespace js
+
+#endif /* vm_FunctionPrefixKind_h */
diff --git a/js/src/vm/GeckoProfiler-inl.h b/js/src/vm/GeckoProfiler-inl.h
new file mode 100644
index 0000000000..087a80022e
--- /dev/null
+++ b/js/src/vm/GeckoProfiler-inl.h
@@ -0,0 +1,141 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_GeckoProfiler_inl_h
+#define vm_GeckoProfiler_inl_h
+
+#include "vm/GeckoProfiler.h"
+
+#include "js/ProfilingStack.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "vm/Runtime.h"
+
+namespace js {
+
+inline void GeckoProfilerThread::updatePC(JSContext* cx, JSScript* script,
+ jsbytecode* pc) {
+ if (!cx->runtime()->geckoProfiler().enabled()) {
+ return;
+ }
+
+ uint32_t sp = profilingStack_->stackPointer;
+ if (sp - 1 < profilingStack_->stackCapacity()) {
+ MOZ_ASSERT(sp > 0);
+ MOZ_ASSERT(profilingStack_->frames[sp - 1].rawScript() == script);
+ profilingStack_->frames[sp - 1].setPC(pc);
+ }
+}
+
+/*
+ * This class is used to suppress profiler sampling during
+ * critical sections where stack state is not valid.
+ */
+class MOZ_RAII AutoSuppressProfilerSampling {
+ public:
+ explicit AutoSuppressProfilerSampling(JSContext* cx);
+
+ ~AutoSuppressProfilerSampling();
+
+ private:
+ JSContext* cx_;
+ bool previouslyEnabled_;
+};
+
+MOZ_ALWAYS_INLINE
+GeckoProfilerEntryMarker::GeckoProfilerEntryMarker(JSContext* cx,
+ JSScript* script)
+ : profiler_(&cx->geckoProfiler()) {
+ if (MOZ_LIKELY(!profiler_->infraInstalled())) {
+ profiler_ = nullptr;
+#ifdef DEBUG
+ spBefore_ = 0;
+#endif
+ return;
+ }
+#ifdef DEBUG
+ spBefore_ = profiler_->stackPointer();
+#endif
+
+ // Push an sp marker frame so the profiler can correctly order JS and native
+ // stacks.
+ profiler_->profilingStack_->pushSpMarkerFrame(this);
+
+ profiler_->profilingStack_->pushJsFrame(
+ "js::RunScript",
+ /* dynamicString = */ nullptr, script, script->code(),
+ script->realm()->creationOptions().profilerRealmID());
+}
+
+MOZ_ALWAYS_INLINE
+GeckoProfilerEntryMarker::~GeckoProfilerEntryMarker() {
+ if (MOZ_LIKELY(profiler_ == nullptr)) {
+ return;
+ }
+
+ profiler_->profilingStack_->pop(); // the JS frame
+ profiler_->profilingStack_->pop(); // the SP_MARKER frame
+ MOZ_ASSERT(spBefore_ == profiler_->stackPointer());
+}
+
+MOZ_ALWAYS_INLINE
+AutoGeckoProfilerEntry::AutoGeckoProfilerEntry(
+ JSContext* cx, const char* label, const char* dynamicString,
+ JS::ProfilingCategoryPair categoryPair, uint32_t flags) {
+ profilingStack_ = GetContextProfilingStackIfEnabled(cx);
+ if (MOZ_LIKELY(!profilingStack_)) {
+#ifdef DEBUG
+ profiler_ = nullptr;
+ spBefore_ = 0;
+#endif
+ return;
+ }
+
+#ifdef DEBUG
+ profiler_ = &cx->geckoProfiler();
+ spBefore_ = profiler_->stackPointer();
+#endif
+
+ profilingStack_->pushLabelFrame(label, dynamicString,
+ /* sp = */ this, categoryPair, flags);
+}
+
+MOZ_ALWAYS_INLINE
+AutoGeckoProfilerEntry::~AutoGeckoProfilerEntry() {
+ if (MOZ_LIKELY(!profilingStack_)) {
+ return;
+ }
+
+ profilingStack_->pop();
+ MOZ_ASSERT(spBefore_ == profiler_->stackPointer());
+}
+
+MOZ_ALWAYS_INLINE
+AutoGeckoProfilerEntry::AutoGeckoProfilerEntry(
+ JSContext* cx, const char* label, JS::ProfilingCategoryPair categoryPair,
+ uint32_t flags)
+ : AutoGeckoProfilerEntry(cx, label, /* dynamicString */ nullptr,
+ categoryPair, flags) {}
+
+MOZ_ALWAYS_INLINE
+AutoJSMethodProfilerEntry::AutoJSMethodProfilerEntry(JSContext* cx,
+ const char* label,
+ const char* dynamicString)
+ : AutoGeckoProfilerEntry(
+ cx, label, dynamicString, JS::ProfilingCategoryPair::JS_Builtin,
+ uint32_t(ProfilingStackFrame::Flags::RELEVANT_FOR_JS) |
+ uint32_t(ProfilingStackFrame::Flags::STRING_TEMPLATE_METHOD)) {}
+
+MOZ_ALWAYS_INLINE
+AutoJSConstructorProfilerEntry::AutoJSConstructorProfilerEntry(
+ JSContext* cx, const char* label)
+ : AutoGeckoProfilerEntry(
+ cx, label, "constructor", JS::ProfilingCategoryPair::JS_Builtin,
+ uint32_t(ProfilingStackFrame::Flags::RELEVANT_FOR_JS)) {}
+
+} // namespace js
+
+#endif // vm_GeckoProfiler_inl_h
diff --git a/js/src/vm/GeckoProfiler.cpp b/js/src/vm/GeckoProfiler.cpp
new file mode 100644
index 0000000000..41c13232df
--- /dev/null
+++ b/js/src/vm/GeckoProfiler.cpp
@@ -0,0 +1,561 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/GeckoProfiler-inl.h"
+
+#include "mozilla/Sprintf.h"
+
+#include "gc/GC.h"
+#include "gc/PublicIterators.h"
+#include "jit/BaselineJIT.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitRuntime.h"
+#include "jit/JSJitFrameIter.h"
+#include "jit/PerfSpewer.h"
+#include "js/ProfilingStack.h"
+#include "vm/FrameIter.h" // js::OnlyJSJitFrameIter
+#include "vm/JitActivation.h"
+#include "vm/JSScript.h"
+
+#include "gc/Marking-inl.h"
+#include "jit/JSJitFrameIter-inl.h"
+
+using namespace js;
+
+GeckoProfilerThread::GeckoProfilerThread()
+ : profilingStack_(nullptr), profilingStackIfEnabled_(nullptr) {}
+
+GeckoProfilerRuntime::GeckoProfilerRuntime(JSRuntime* rt)
+ : rt(rt),
+ strings_(),
+ slowAssertions(false),
+ enabled_(false),
+ eventMarker_(nullptr) {
+ MOZ_ASSERT(rt != nullptr);
+}
+
+void GeckoProfilerThread::setProfilingStack(ProfilingStack* profilingStack,
+ bool enabled) {
+ profilingStack_ = profilingStack;
+ profilingStackIfEnabled_ = enabled ? profilingStack : nullptr;
+}
+
+void GeckoProfilerRuntime::setEventMarker(void (*fn)(const char*,
+ const char*)) {
+ eventMarker_ = fn;
+}
+
+// Get a pointer to the top-most profiling frame, given the exit frame pointer.
+static jit::JitFrameLayout* GetTopProfilingJitFrame(jit::JitActivation* act) {
+ // If there is no exit frame set, just return.
+ if (!act->hasExitFP()) {
+ return nullptr;
+ }
+
+ // Skip wasm frames that might be in the way.
+ OnlyJSJitFrameIter iter(act);
+ if (iter.done()) {
+ return nullptr;
+ }
+
+ jit::JSJitProfilingFrameIterator jitIter(
+ (jit::CommonFrameLayout*)iter.frame().fp());
+ MOZ_ASSERT(!jitIter.done());
+ return jitIter.framePtr();
+}
+
+void GeckoProfilerRuntime::enable(bool enabled) {
+ JSContext* cx = rt->mainContextFromAnyThread();
+ MOZ_ASSERT(cx->geckoProfiler().infraInstalled());
+
+ if (enabled_ == enabled) {
+ return;
+ }
+
+ /*
+ * Ensure all future generated code will be instrumented, or that all
+ * currently instrumented code is discarded
+ */
+ ReleaseAllJITCode(rt->gcContext());
+
+ // This function is called when the Gecko profiler makes a new Sampler
+ // (and thus, a new circular buffer). Set all current entries in the
+ // JitcodeGlobalTable as expired and reset the buffer range start.
+ if (rt->hasJitRuntime() && rt->jitRuntime()->hasJitcodeGlobalTable()) {
+ rt->jitRuntime()->getJitcodeGlobalTable()->setAllEntriesAsExpired();
+ }
+ rt->setProfilerSampleBufferRangeStart(0);
+
+ // Ensure that lastProfilingFrame is null for the main thread.
+ if (cx->jitActivation) {
+ cx->jitActivation->setLastProfilingFrame(nullptr);
+ cx->jitActivation->setLastProfilingCallSite(nullptr);
+ }
+
+ enabled_ = enabled;
+
+ /* Toggle Gecko Profiler-related jumps on baseline jitcode.
+ * The call to |ReleaseAllJITCode| above will release most baseline jitcode,
+ * but not jitcode for scripts with active frames on the stack. These scripts
+ * need to have their profiler state toggled so they behave properly.
+ */
+ jit::ToggleBaselineProfiling(cx, enabled);
+
+ // Update lastProfilingFrame to point to the top-most JS jit-frame currently
+ // on stack.
+ if (cx->jitActivation) {
+ // Walk through all activations, and set their lastProfilingFrame
+ // appropriately.
+ if (enabled) {
+ jit::JitActivation* jitActivation = cx->jitActivation;
+ while (jitActivation) {
+ auto* lastProfilingFrame = GetTopProfilingJitFrame(jitActivation);
+ jitActivation->setLastProfilingFrame(lastProfilingFrame);
+ jitActivation->setLastProfilingCallSite(nullptr);
+ jitActivation = jitActivation->prevJitActivation();
+ }
+ } else {
+ jit::JitActivation* jitActivation = cx->jitActivation;
+ while (jitActivation) {
+ jitActivation->setLastProfilingFrame(nullptr);
+ jitActivation->setLastProfilingCallSite(nullptr);
+ jitActivation = jitActivation->prevJitActivation();
+ }
+ }
+ }
+
+ // WebAssembly code does not need to be released, but profiling string
+ // labels have to be generated so that they are available during async
+ // profiling stack iteration.
+ for (RealmsIter r(rt); !r.done(); r.next()) {
+ r->wasm.ensureProfilingLabels(enabled);
+ }
+
+#ifdef JS_STRUCTURED_SPEW
+ // Enable the structured spewer if the environment variable is set.
+ if (enabled) {
+ cx->spewer().enableSpewing();
+ } else {
+ cx->spewer().disableSpewing();
+ }
+#endif
+}
+
+/* Lookup the string for the function/script, creating one if necessary */
+const char* GeckoProfilerRuntime::profileString(JSContext* cx,
+ BaseScript* script) {
+ ProfileStringMap::AddPtr s = strings().lookupForAdd(script);
+
+ if (!s) {
+ UniqueChars str = allocProfileString(cx, script);
+ if (!str) {
+ return nullptr;
+ }
+ MOZ_ASSERT(script->hasBytecode());
+ if (!strings().add(s, script, std::move(str))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ }
+
+ return s->value().get();
+}
+
+void GeckoProfilerRuntime::onScriptFinalized(BaseScript* script) {
+ /*
+ * This function is called whenever a script is destroyed, regardless of
+ * whether profiling has been turned on, so don't invoke a function on an
+ * invalid hash set. Also, even if profiling was enabled but then turned
+ * off, we still want to remove the string, so no check of enabled() is
+ * done.
+ */
+ if (ProfileStringMap::Ptr entry = strings().lookup(script)) {
+ strings().remove(entry);
+ }
+}
+
+void GeckoProfilerRuntime::markEvent(const char* event, const char* details) {
+ MOZ_ASSERT(enabled());
+ if (eventMarker_) {
+ JS::AutoSuppressGCAnalysis nogc;
+ eventMarker_(event, details);
+ }
+}
+
+bool GeckoProfilerThread::enter(JSContext* cx, JSScript* script) {
+ const char* dynamicString =
+ cx->runtime()->geckoProfiler().profileString(cx, script);
+ if (dynamicString == nullptr) {
+ return false;
+ }
+
+#ifdef DEBUG
+ // In debug builds, assert the JS profiling stack frames already on the
+ // stack have a non-null pc. Only look at the top frames to avoid quadratic
+ // behavior.
+ uint32_t sp = profilingStack_->stackPointer;
+ if (sp > 0 && sp - 1 < profilingStack_->stackCapacity()) {
+ size_t start = (sp > 4) ? sp - 4 : 0;
+ for (size_t i = start; i < sp - 1; i++) {
+ MOZ_ASSERT_IF(profilingStack_->frames[i].isJsFrame(),
+ profilingStack_->frames[i].pc());
+ }
+ }
+#endif
+
+ profilingStack_->pushJsFrame(
+ "", dynamicString, script, script->code(),
+ script->realm()->creationOptions().profilerRealmID());
+ return true;
+}
+
+void GeckoProfilerThread::exit(JSContext* cx, JSScript* script) {
+ profilingStack_->pop();
+
+#ifdef DEBUG
+ /* Sanity check to make sure push/pop balanced */
+ uint32_t sp = profilingStack_->stackPointer;
+ if (sp < profilingStack_->stackCapacity()) {
+ JSRuntime* rt = script->runtimeFromMainThread();
+ const char* dynamicString = rt->geckoProfiler().profileString(cx, script);
+ /* Can't fail lookup because we should already be in the set */
+ MOZ_ASSERT(dynamicString);
+
+ // Bug 822041
+ if (!profilingStack_->frames[sp].isJsFrame()) {
+ fprintf(stderr, "--- ABOUT TO FAIL ASSERTION ---\n");
+ fprintf(stderr, " frames=%p size=%u/%u\n", (void*)profilingStack_->frames,
+ uint32_t(profilingStack_->stackPointer),
+ profilingStack_->stackCapacity());
+ for (int32_t i = sp; i >= 0; i--) {
+ ProfilingStackFrame& frame = profilingStack_->frames[i];
+ if (frame.isJsFrame()) {
+ fprintf(stderr, " [%d] JS %s\n", i, frame.dynamicString());
+ } else {
+ fprintf(stderr, " [%d] Label %s\n", i, frame.dynamicString());
+ }
+ }
+ }
+
+ ProfilingStackFrame& frame = profilingStack_->frames[sp];
+ MOZ_ASSERT(frame.isJsFrame());
+ MOZ_ASSERT(frame.script() == script);
+ MOZ_ASSERT(strcmp((const char*)frame.dynamicString(), dynamicString) == 0);
+ }
+#endif
+}
+
+/*
+ * Serializes the script/function pair into a "descriptive string" which is
+ * allowed to fail. This function cannot trigger a GC because it could finalize
+ * some scripts, resize the hash table of profile strings, and invalidate the
+ * AddPtr held while invoking allocProfileString.
+ */
+/* static */
+UniqueChars GeckoProfilerRuntime::allocProfileString(JSContext* cx,
+ BaseScript* script) {
+ // Note: this profiler string is regexp-matched by
+ // devtools/client/profiler/cleopatra/js/parserWorker.js.
+
+ // If the script has a function, try calculating its name.
+ bool hasName = false;
+ size_t nameLength = 0;
+ UniqueChars nameStr;
+ JSFunction* func = script->function();
+ if (func && func->displayAtom()) {
+ nameStr = StringToNewUTF8CharsZ(cx, *func->displayAtom());
+ if (!nameStr) {
+ return nullptr;
+ }
+
+ nameLength = strlen(nameStr.get());
+ hasName = true;
+ }
+
+ // Calculate filename length. We cap this to a reasonable limit to avoid
+ // performance impact of strlen/alloc/memcpy.
+ constexpr size_t MaxFilenameLength = 200;
+ const char* filenameStr = script->filename() ? script->filename() : "(null)";
+ size_t filenameLength = js_strnlen(filenameStr, MaxFilenameLength);
+
+ // Calculate line + column length.
+ bool hasLineAndColumn = false;
+ size_t lineAndColumnLength = 0;
+ char lineAndColumnStr[30];
+ if (hasName || script->isFunction() || script->isForEval()) {
+ lineAndColumnLength = SprintfLiteral(lineAndColumnStr, "%u:%u",
+ script->lineno(), script->column());
+ hasLineAndColumn = true;
+ }
+
+ // Full profile string for scripts with functions is:
+ // FuncName (FileName:Lineno:Column)
+ // Full profile string for scripts without functions is:
+ // FileName:Lineno:Column
+ // Full profile string for scripts without functions and without lines is:
+ // FileName
+
+ // Calculate full string length.
+ size_t fullLength = 0;
+ if (hasName) {
+ MOZ_ASSERT(hasLineAndColumn);
+ fullLength = nameLength + 2 + filenameLength + 1 + lineAndColumnLength + 1;
+ } else if (hasLineAndColumn) {
+ fullLength = filenameLength + 1 + lineAndColumnLength;
+ } else {
+ fullLength = filenameLength;
+ }
+
+ // Allocate string.
+ UniqueChars str(cx->pod_malloc<char>(fullLength + 1));
+ if (!str) {
+ return nullptr;
+ }
+
+ size_t cur = 0;
+
+ // Fill string with function name if needed.
+ if (hasName) {
+ memcpy(str.get() + cur, nameStr.get(), nameLength);
+ cur += nameLength;
+ str[cur++] = ' ';
+ str[cur++] = '(';
+ }
+
+ // Fill string with filename chars.
+ memcpy(str.get() + cur, filenameStr, filenameLength);
+ cur += filenameLength;
+
+ // Fill line + column chars.
+ if (hasLineAndColumn) {
+ str[cur++] = ':';
+ memcpy(str.get() + cur, lineAndColumnStr, lineAndColumnLength);
+ cur += lineAndColumnLength;
+ }
+
+ // Terminal ')' if necessary.
+ if (hasName) {
+ str[cur++] = ')';
+ }
+
+ MOZ_ASSERT(cur == fullLength);
+ str[cur] = 0;
+
+ return str;
+}
+
+void GeckoProfilerThread::trace(JSTracer* trc) {
+ if (profilingStack_) {
+ size_t size = profilingStack_->stackSize();
+ for (size_t i = 0; i < size; i++) {
+ profilingStack_->frames[i].trace(trc);
+ }
+ }
+}
+
+void GeckoProfilerRuntime::fixupStringsMapAfterMovingGC() {
+ for (ProfileStringMap::Enum e(strings()); !e.empty(); e.popFront()) {
+ BaseScript* script = e.front().key();
+ if (IsForwarded(script)) {
+ script = Forwarded(script);
+ e.rekeyFront(script);
+ }
+ }
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void GeckoProfilerRuntime::checkStringsMapAfterMovingGC() {
+ for (auto r = strings().all(); !r.empty(); r.popFront()) {
+ BaseScript* script = r.front().key();
+ CheckGCThingAfterMovingGC(script);
+ auto ptr = strings().lookup(script);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+}
+#endif
+
+void ProfilingStackFrame::trace(JSTracer* trc) {
+ if (isJsFrame()) {
+ JSScript* s = rawScript();
+ TraceNullableRoot(trc, &s, "ProfilingStackFrame script");
+ spOrScript = s;
+ }
+}
+
+GeckoProfilerBaselineOSRMarker::GeckoProfilerBaselineOSRMarker(
+ JSContext* cx, bool hasProfilerFrame)
+ : profiler(&cx->geckoProfiler()) {
+ if (!hasProfilerFrame || !cx->runtime()->geckoProfiler().enabled()) {
+ profiler = nullptr;
+ return;
+ }
+
+ uint32_t sp = profiler->profilingStack_->stackPointer;
+ if (sp >= profiler->profilingStack_->stackCapacity()) {
+ profiler = nullptr;
+ return;
+ }
+
+ spBefore_ = sp;
+ if (sp == 0) {
+ return;
+ }
+
+ ProfilingStackFrame& frame = profiler->profilingStack_->frames[sp - 1];
+ MOZ_ASSERT(!frame.isOSRFrame());
+ frame.setIsOSRFrame(true);
+}
+
+GeckoProfilerBaselineOSRMarker::~GeckoProfilerBaselineOSRMarker() {
+ if (profiler == nullptr) {
+ return;
+ }
+
+ uint32_t sp = profiler->stackPointer();
+ MOZ_ASSERT(spBefore_ == sp);
+ if (sp == 0) {
+ return;
+ }
+
+ ProfilingStackFrame& frame = profiler->stack()[sp - 1];
+ MOZ_ASSERT(frame.isOSRFrame());
+ frame.setIsOSRFrame(false);
+}
+
+JS_PUBLIC_API JSScript* ProfilingStackFrame::script() const {
+ MOZ_ASSERT(isJsFrame());
+ auto* script = reinterpret_cast<JSScript*>(spOrScript.operator void*());
+ if (!script) {
+ return nullptr;
+ }
+
+ // If profiling is supressed then we can't trust the script pointers to be
+ // valid as they could be in the process of being moved by a compacting GC
+ // (although it's still OK to get the runtime from them).
+ JSContext* cx = script->runtimeFromAnyThread()->mainContextFromAnyThread();
+ if (!cx->isProfilerSamplingEnabled()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(!IsForwarded(script));
+ return script;
+}
+
+JS_PUBLIC_API JSFunction* ProfilingStackFrame::function() const {
+ JSScript* script = this->script();
+ return script ? script->function() : nullptr;
+}
+
+JS_PUBLIC_API jsbytecode* ProfilingStackFrame::pc() const {
+ MOZ_ASSERT(isJsFrame());
+ if (pcOffsetIfJS_ == NullPCOffset) {
+ return nullptr;
+ }
+
+ JSScript* script = this->script();
+ return script ? script->offsetToPC(pcOffsetIfJS_) : nullptr;
+}
+
+/* static */
+int32_t ProfilingStackFrame::pcToOffset(JSScript* aScript, jsbytecode* aPc) {
+ return aPc ? aScript->pcToOffset(aPc) : NullPCOffset;
+}
+
+void ProfilingStackFrame::setPC(jsbytecode* pc) {
+ MOZ_ASSERT(isJsFrame());
+ JSScript* script = this->script();
+ MOZ_ASSERT(
+ script); // This should not be called while profiling is suppressed.
+ pcOffsetIfJS_ = pcToOffset(script, pc);
+}
+
+JS_PUBLIC_API void js::SetContextProfilingStack(
+ JSContext* cx, ProfilingStack* profilingStack) {
+ cx->geckoProfiler().setProfilingStack(
+ profilingStack, cx->runtime()->geckoProfiler().enabled());
+}
+
+JS_PUBLIC_API void js::EnableContextProfilingStack(JSContext* cx,
+ bool enabled) {
+ cx->geckoProfiler().enable(enabled);
+ cx->runtime()->geckoProfiler().enable(enabled);
+}
+
+JS_PUBLIC_API void js::RegisterContextProfilingEventMarker(
+ JSContext* cx, void (*fn)(const char*, const char*)) {
+ MOZ_ASSERT(cx->runtime()->geckoProfiler().enabled());
+ cx->runtime()->geckoProfiler().setEventMarker(fn);
+}
+
+AutoSuppressProfilerSampling::AutoSuppressProfilerSampling(JSContext* cx)
+ : cx_(cx), previouslyEnabled_(cx->isProfilerSamplingEnabled()) {
+ if (previouslyEnabled_) {
+ cx_->disableProfilerSampling();
+ }
+}
+
+AutoSuppressProfilerSampling::~AutoSuppressProfilerSampling() {
+ if (previouslyEnabled_) {
+ cx_->enableProfilerSampling();
+ }
+}
+
+namespace JS {
+
+// clang-format off
+
+// ProfilingSubcategory_X:
+// One enum for each category X, listing that category's subcategories. This
+// allows the sProfilingCategoryInfo macro construction below to look up a
+// per-category index for a subcategory.
+#define SUBCATEGORY_ENUMS_BEGIN_CATEGORY(name, labelAsString, color) \
+ enum class ProfilingSubcategory_##name : uint32_t {
+#define SUBCATEGORY_ENUMS_SUBCATEGORY(category, name, labelAsString) \
+ name,
+#define SUBCATEGORY_ENUMS_END_CATEGORY \
+ };
+MOZ_PROFILING_CATEGORY_LIST(SUBCATEGORY_ENUMS_BEGIN_CATEGORY,
+ SUBCATEGORY_ENUMS_SUBCATEGORY,
+ SUBCATEGORY_ENUMS_END_CATEGORY)
+#undef SUBCATEGORY_ENUMS_BEGIN_CATEGORY
+#undef SUBCATEGORY_ENUMS_SUBCATEGORY
+#undef SUBCATEGORY_ENUMS_END_CATEGORY
+
+// sProfilingCategoryPairInfo:
+// A list of ProfilingCategoryPairInfos with the same order as
+// ProfilingCategoryPair, which can be used to map a ProfilingCategoryPair to
+// its information.
+#define CATEGORY_INFO_BEGIN_CATEGORY(name, labelAsString, color)
+#define CATEGORY_INFO_SUBCATEGORY(category, name, labelAsString) \
+ {ProfilingCategory::category, \
+ uint32_t(ProfilingSubcategory_##category::name), labelAsString},
+#define CATEGORY_INFO_END_CATEGORY
+const ProfilingCategoryPairInfo sProfilingCategoryPairInfo[] = {
+ MOZ_PROFILING_CATEGORY_LIST(CATEGORY_INFO_BEGIN_CATEGORY,
+ CATEGORY_INFO_SUBCATEGORY,
+ CATEGORY_INFO_END_CATEGORY)
+};
+#undef CATEGORY_INFO_BEGIN_CATEGORY
+#undef CATEGORY_INFO_SUBCATEGORY
+#undef CATEGORY_INFO_END_CATEGORY
+
+// clang-format on
+
+JS_PUBLIC_API const ProfilingCategoryPairInfo& GetProfilingCategoryPairInfo(
+ ProfilingCategoryPair aCategoryPair) {
+ static_assert(
+ MOZ_ARRAY_LENGTH(sProfilingCategoryPairInfo) ==
+ uint32_t(ProfilingCategoryPair::COUNT),
+ "sProfilingCategoryPairInfo and ProfilingCategory need to have the "
+ "same order and the same length");
+
+ uint32_t categoryPairIndex = uint32_t(aCategoryPair);
+ MOZ_RELEASE_ASSERT(categoryPairIndex <=
+ uint32_t(ProfilingCategoryPair::LAST));
+ return sProfilingCategoryPairInfo[categoryPairIndex];
+}
+
+} // namespace JS
diff --git a/js/src/vm/GeckoProfiler.h b/js/src/vm/GeckoProfiler.h
new file mode 100644
index 0000000000..9fcbfed9a5
--- /dev/null
+++ b/js/src/vm/GeckoProfiler.h
@@ -0,0 +1,255 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_GeckoProfiler_h
+#define vm_GeckoProfiler_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/DebugOnly.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jspubtd.h"
+
+#include "js/AllocPolicy.h"
+#include "js/HashTable.h"
+#include "js/ProfilingCategory.h"
+#include "js/TypeDecls.h"
+#include "js/Utility.h"
+#include "threading/ProtectedData.h"
+
+/*
+ * Gecko Profiler integration with the JS Engine
+ * https://developer.mozilla.org/en/Performance/Profiling_with_the_Built-in_Profiler
+ *
+ * The Gecko Profiler (found in tools/profiler) is an implementation of a
+ * profiler which has the ability to walk the C++ stack as well as use
+ * instrumentation to gather information. When dealing with JS, however, the
+ * profiler needs integration with the engine because otherwise it is very
+ * difficult to figure out what javascript is executing.
+ *
+ * The current method of integration with the profiler is a form of
+ * instrumentation: every time a JS function is entered, a bit of information
+ * is pushed onto a stack that the profiler owns and maintains. This
+ * information is then popped at the end of the JS function. The profiler
+ * informs the JS engine of this stack at runtime, and it can by turned on/off
+ * dynamically. Each stack frame has type ProfilingStackFrame.
+ *
+ * Throughout execution, the size of the stack recorded in memory may exceed the
+ * maximum. The JS engine will not write any information past the maximum limit,
+ * but it will still maintain the size of the stack. Profiler code is aware of
+ * this and iterates the stack accordingly.
+ *
+ * There is some information pushed on the profiler stack for every JS function
+ * that is entered. First is a char* label with a description of what function
+ * was entered. Currently this string is of the form "function (file:line)" if
+ * there's a function name, or just "file:line" if there's no function name
+ * available. The other bit of information is the relevant C++ (native) stack
+ * pointer. This stack pointer is what enables the interleaving of the C++ and
+ * the JS stack. Finally, throughout execution of the function, some extra
+ * information may be updated on the ProfilingStackFrame structure.
+ *
+ * = Profile Strings
+ *
+ * The profile strings' allocations and deallocation must be carefully
+ * maintained, and ideally at a very low overhead cost. For this reason, the JS
+ * engine maintains a mapping of all known profile strings. These strings are
+ * keyed in lookup by a JSScript*, but are serialized with a JSFunction*,
+ * JSScript* pair. A JSScript will destroy its corresponding profile string when
+ * the script is finalized.
+ *
+ * For this reason, a char* pointer pushed on the profiler stack is valid only
+ * while it is on the profiler stack. The profiler uses sampling to read off
+ * information from this instrumented stack, and it therefore copies the string
+ * byte for byte when a JS function is encountered during sampling.
+ *
+ * = Native Stack Pointer
+ *
+ * The actual value pushed as the native pointer is nullptr for most JS
+ * functions. The reason for this is that there's actually very little
+ * correlation between the JS stack and the C++ stack because many JS functions
+ * all run in the same C++ frame, or can even go backwards in C++ when going
+ * from the JIT back to the interpreter.
+ *
+ * To alleviate this problem, all JS functions push nullptr as their "native
+ * stack pointer" to indicate that it's a JS function call. The function
+ * RunScript(), however, pushes an actual C++ stack pointer onto the profiler
+ * stack. This way when interleaving C++ and JS, if the Gecko Profiler sees a
+ * nullptr native stack pointer on the profiler stack, it looks backwards for
+ * the first non-nullptr pointer and uses that for all subsequent nullptr
+ * native stack pointers.
+ *
+ * = Line Numbers
+ *
+ * One goal of sampling is to get both a backtrace of the JS stack, but also
+ * know where within each function on the stack execution currently is. For
+ * this, each ProfilingStackFrame has a 'pc' field to tell where its execution
+ * currently is. This field is updated whenever a call is made to another JS
+ * function, and for the JIT it is also updated whenever the JIT is left.
+ *
+ * This field is in a union with a uint32_t 'line' so that C++ can make use of
+ * the field as well. It was observed that tracking 'line' via PCToLineNumber in
+ * JS was far too expensive, so that is why the pc instead of the translated
+ * line number is stored.
+ *
+ * As an invariant, if the pc is nullptr, then the JIT is currently executing
+ * generated code. Otherwise execution is in another JS function or in C++. With
+ * this in place, only the top frame of the stack can ever have nullptr as its
+ * pc. Additionally with this invariant, it is possible to maintain mappings of
+ * JIT code to pc which can be accessed safely because they will only be
+ * accessed from a signal handler when the JIT code is executing.
+ */
+
+class JS_PUBLIC_API ProfilingStack;
+
+namespace js {
+
+class BaseScript;
+class GeckoProfilerThread;
+
+// The `ProfileStringMap` weakly holds its `BaseScript*` keys and owns its
+// string values. Entries are removed when the `BaseScript` is finalized; see
+// `GeckoProfiler::onScriptFinalized`.
+using ProfileStringMap = HashMap<BaseScript*, JS::UniqueChars,
+ DefaultHasher<BaseScript*>, SystemAllocPolicy>;
+
+class GeckoProfilerRuntime {
+ JSRuntime* rt;
+ MainThreadData<ProfileStringMap> strings_;
+ bool slowAssertions;
+ uint32_t enabled_;
+ void (*eventMarker_)(const char*, const char*);
+
+ public:
+ explicit GeckoProfilerRuntime(JSRuntime* rt);
+
+ /* management of whether instrumentation is on or off */
+ bool enabled() { return enabled_; }
+ void enable(bool enabled);
+ void enableSlowAssertions(bool enabled) { slowAssertions = enabled; }
+ bool slowAssertionsEnabled() { return slowAssertions; }
+
+ void setEventMarker(void (*fn)(const char*, const char*));
+
+ static JS::UniqueChars allocProfileString(JSContext* cx, BaseScript* script);
+ const char* profileString(JSContext* cx, BaseScript* script);
+
+ void onScriptFinalized(BaseScript* script);
+
+ void markEvent(const char* event, const char* details);
+
+ ProfileStringMap& strings() { return strings_.ref(); }
+
+ /* meant to be used for testing, not recommended to call in normal code */
+ size_t stringsCount();
+ void stringsReset();
+
+ uint32_t* addressOfEnabled() { return &enabled_; }
+
+ void fixupStringsMapAfterMovingGC();
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkStringsMapAfterMovingGC();
+#endif
+};
+
+inline size_t GeckoProfilerRuntime::stringsCount() { return strings().count(); }
+
+inline void GeckoProfilerRuntime::stringsReset() { strings().clear(); }
+
+/*
+ * This class is used in RunScript() to push the marker onto the sampling stack
+ * that we're about to enter JS function calls. This is the only time in which a
+ * valid stack pointer is pushed to the sampling stack.
+ */
+class MOZ_RAII GeckoProfilerEntryMarker {
+ public:
+ explicit MOZ_ALWAYS_INLINE GeckoProfilerEntryMarker(JSContext* cx,
+ JSScript* script);
+ MOZ_ALWAYS_INLINE ~GeckoProfilerEntryMarker();
+
+ private:
+ GeckoProfilerThread* profiler_;
+#ifdef DEBUG
+ uint32_t spBefore_;
+#endif
+};
+
+/*
+ * RAII class to automatically add Gecko Profiler profiling stack frames.
+ * It retrieves the ProfilingStack from the JSContext and does nothing if the
+ * profiler is inactive.
+ *
+ * NB: The `label` string must be statically allocated.
+ */
+class MOZ_RAII AutoGeckoProfilerEntry {
+ public:
+ explicit MOZ_ALWAYS_INLINE AutoGeckoProfilerEntry(
+ JSContext* cx, const char* label, const char* dynamicString,
+ JS::ProfilingCategoryPair categoryPair = JS::ProfilingCategoryPair::JS,
+ uint32_t flags = 0);
+ explicit MOZ_ALWAYS_INLINE AutoGeckoProfilerEntry(
+ JSContext* cx, const char* label,
+ JS::ProfilingCategoryPair categoryPair = JS::ProfilingCategoryPair::JS,
+ uint32_t flags = 0);
+ MOZ_ALWAYS_INLINE ~AutoGeckoProfilerEntry();
+
+ private:
+ ProfilingStack* profilingStack_;
+#ifdef DEBUG
+ GeckoProfilerThread* profiler_;
+ uint32_t spBefore_;
+#endif
+};
+
+/*
+ * Use this RAII class to add Gecko Profiler label frames for methods of the
+ * JavaScript builtin API.
+ * These frames will be exposed to JavaScript developers (ie they won't be
+ * filtered out when using the "JavaScript" filtering option in the Firefox
+ * Profiler UI).
+ * Technical note: the label and dynamicString values will be joined with a dot
+ * separator if dynamicString is present.
+ */
+class MOZ_RAII AutoJSMethodProfilerEntry : public AutoGeckoProfilerEntry {
+ public:
+ explicit MOZ_ALWAYS_INLINE AutoJSMethodProfilerEntry(
+ JSContext* cx, const char* label, const char* dynamicString = nullptr);
+};
+
+/*
+ * Use this RAII class to add Gecko Profiler label frames for constructors of
+ * the JavaScript builtin API.
+ * These frames will be exposed to JavaScript developers (ie they won't be
+ * filtered out when using the "JavaScript" filtering option in the Firefox
+ * Profiler UI).
+ * Technical note: the word "constructor" will be appended to the label (with a
+ * space separator).
+ */
+class MOZ_RAII AutoJSConstructorProfilerEntry : public AutoGeckoProfilerEntry {
+ public:
+ explicit MOZ_ALWAYS_INLINE AutoJSConstructorProfilerEntry(JSContext* cx,
+ const char* label);
+};
+
+/*
+ * This class is used in the interpreter to bound regions where the baseline JIT
+ * being entered via OSR. It marks the current top profiling stack frame as
+ * OSR-ed
+ */
+class MOZ_RAII GeckoProfilerBaselineOSRMarker {
+ public:
+ explicit GeckoProfilerBaselineOSRMarker(JSContext* cx, bool hasProfilerFrame);
+ ~GeckoProfilerBaselineOSRMarker();
+
+ private:
+ GeckoProfilerThread* profiler;
+ mozilla::DebugOnly<uint32_t> spBefore_;
+};
+
+} /* namespace js */
+
+#endif /* vm_GeckoProfiler_h */
diff --git a/js/src/vm/GeneratorAndAsyncKind.h b/js/src/vm/GeneratorAndAsyncKind.h
new file mode 100644
index 0000000000..c3761a9dec
--- /dev/null
+++ b/js/src/vm/GeneratorAndAsyncKind.h
@@ -0,0 +1,17 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_GeneratorAndAsyncKind_h
+#define vm_GeneratorAndAsyncKind_h
+
+namespace js {
+
+enum class GeneratorKind : bool { NotGenerator, Generator };
+enum class FunctionAsyncKind : bool { SyncFunction, AsyncFunction };
+
+} /* namespace js */
+
+#endif /* vm_GeneratorAndAsyncKind_h */
diff --git a/js/src/vm/GeneratorObject.cpp b/js/src/vm/GeneratorObject.cpp
new file mode 100644
index 0000000000..08f1ebd948
--- /dev/null
+++ b/js/src/vm/GeneratorObject.cpp
@@ -0,0 +1,508 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/GeneratorObject.h"
+
+#include "frontend/ParserAtom.h"
+#ifdef DEBUG
+# include "js/friend/DumpFunctions.h" // js::DumpObject, js::DumpValue
+#endif
+#include "js/PropertySpec.h"
+#include "vm/AsyncFunction.h"
+#include "vm/AsyncIteration.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/GlobalObject.h"
+#include "vm/JSObject.h"
+#include "vm/PlainObject.h" // js::PlainObject
+
+#include "debugger/DebugAPI-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+
+AbstractGeneratorObject* AbstractGeneratorObject::create(
+ JSContext* cx, HandleFunction callee, HandleScript script,
+ HandleObject environmentChain, Handle<ArgumentsObject*> argsObject) {
+ Rooted<AbstractGeneratorObject*> genObj(cx);
+ if (!callee->isAsync()) {
+ genObj = GeneratorObject::create(cx, callee);
+ } else if (callee->isGenerator()) {
+ genObj = AsyncGeneratorObject::create(cx, callee);
+ } else {
+ genObj = AsyncFunctionGeneratorObject::create(cx, callee);
+ }
+ if (!genObj) {
+ return nullptr;
+ }
+
+ genObj->setCallee(*callee);
+ genObj->setEnvironmentChain(*environmentChain);
+ if (argsObject) {
+ genObj->setArgsObj(*argsObject.get());
+ }
+
+ ArrayObject* stack = NewDenseFullyAllocatedArray(cx, script->nslots());
+ if (!stack) {
+ return nullptr;
+ }
+
+ genObj->setStackStorage(*stack);
+
+ // Note: This assumes that a Warp frame cannot be the target of
+ // the debugger, as we do not call OnNewGenerator.
+ return genObj;
+}
+
+JSObject* AbstractGeneratorObject::createFromFrame(JSContext* cx,
+ AbstractFramePtr frame) {
+ MOZ_ASSERT(frame.isGeneratorFrame());
+ MOZ_ASSERT(!frame.isConstructing());
+
+ if (frame.isModuleFrame()) {
+ return createModuleGenerator(cx, frame);
+ }
+
+ RootedFunction fun(cx, frame.callee());
+ Rooted<ArgumentsObject*> maybeArgs(
+ cx, frame.script()->needsArgsObj() ? &frame.argsObj() : nullptr);
+ RootedObject environmentChain(cx, frame.environmentChain());
+
+ RootedScript script(cx, frame.script());
+ Rooted<AbstractGeneratorObject*> genObj(
+ cx, AbstractGeneratorObject::create(cx, fun, script, environmentChain,
+ maybeArgs));
+ if (!genObj) {
+ return nullptr;
+ }
+
+ if (!DebugAPI::onNewGenerator(cx, frame, genObj)) {
+ return nullptr;
+ }
+
+ return genObj;
+}
+
+JSObject* AbstractGeneratorObject::createModuleGenerator(
+ JSContext* cx, AbstractFramePtr frame) {
+ Rooted<ModuleObject*> module(cx, frame.script()->module());
+ Rooted<AbstractGeneratorObject*> genObj(cx);
+ genObj = AsyncFunctionGeneratorObject::create(cx, module);
+ if (!genObj) {
+ return nullptr;
+ }
+
+ // Create a handler function to wrap the module's script. This way
+ // we can access it later and restore the state.
+ Handle<PropertyName*> funName = cx->names().empty;
+ RootedFunction handlerFun(
+ cx, NewFunctionWithProto(cx, nullptr, 0,
+ FunctionFlags::INTERPRETED_GENERATOR_OR_ASYNC,
+ nullptr, funName, nullptr,
+ gc::AllocKind::FUNCTION, GenericObject));
+ if (!handlerFun) {
+ return nullptr;
+ }
+ handlerFun->initScript(module->script());
+
+ genObj->setCallee(*handlerFun);
+ genObj->setEnvironmentChain(*frame.environmentChain());
+
+ ArrayObject* stack =
+ NewDenseFullyAllocatedArray(cx, module->script()->nslots());
+ if (!stack) {
+ return nullptr;
+ }
+
+ genObj->setStackStorage(*stack);
+
+ if (!DebugAPI::onNewGenerator(cx, frame, genObj)) {
+ return nullptr;
+ }
+
+ return genObj;
+}
+
+void AbstractGeneratorObject::trace(JSTracer* trc) {
+ DebugAPI::traceGeneratorFrame(trc, this);
+}
+
+bool AbstractGeneratorObject::suspend(JSContext* cx, HandleObject obj,
+ AbstractFramePtr frame,
+ const jsbytecode* pc, unsigned nvalues) {
+ MOZ_ASSERT(JSOp(*pc) == JSOp::InitialYield || JSOp(*pc) == JSOp::Yield ||
+ JSOp(*pc) == JSOp::Await);
+
+ auto genObj = obj.as<AbstractGeneratorObject>();
+ MOZ_ASSERT(!genObj->hasStackStorage() || genObj->isStackStorageEmpty());
+ MOZ_ASSERT_IF(JSOp(*pc) == JSOp::Await, genObj->callee().isAsync());
+ MOZ_ASSERT_IF(JSOp(*pc) == JSOp::Yield, genObj->callee().isGenerator());
+
+ if (nvalues > 0) {
+ ArrayObject* stack = nullptr;
+ MOZ_ASSERT(genObj->hasStackStorage());
+ stack = &genObj->stackStorage();
+ MOZ_ASSERT(stack->getDenseCapacity() >= nvalues);
+ if (!frame.saveGeneratorSlots(cx, nvalues, stack)) {
+ return false;
+ }
+ }
+
+ genObj->setResumeIndex(pc);
+ genObj->setEnvironmentChain(*frame.environmentChain());
+ return true;
+}
+
+#ifdef DEBUG
+void AbstractGeneratorObject::dump() const {
+ fprintf(stderr, "(AbstractGeneratorObject*) %p {\n", (void*)this);
+ fprintf(stderr, " callee: (JSFunction*) %p,\n", (void*)&callee());
+ fprintf(stderr, " environmentChain: (JSObject*) %p,\n",
+ (void*)&environmentChain());
+ if (hasArgsObj()) {
+ fprintf(stderr, " argsObj: Some((ArgumentsObject*) %p),\n",
+ (void*)&argsObj());
+ } else {
+ fprintf(stderr, " argsObj: None,\n");
+ }
+ if (hasStackStorage()) {
+ fprintf(stderr, " stackStorage: Some(ArrayObject {\n");
+ ArrayObject& stack = stackStorage();
+ uint32_t denseLen = uint32_t(stack.getDenseInitializedLength());
+ fprintf(stderr, " denseInitializedLength: %u\n,", denseLen);
+ uint32_t len = stack.length();
+ fprintf(stderr, " length: %u\n,", len);
+ fprintf(stderr, " data: [\n");
+ const Value* elements = getDenseElements();
+ for (uint32_t i = 0; i < std::max(len, denseLen); i++) {
+ fprintf(stderr, " [%u]: ", i);
+ js::DumpValue(elements[i]);
+ }
+ fprintf(stderr, " ],\n");
+ fprintf(stderr, " }),\n");
+ } else {
+ fprintf(stderr, " stackStorage: None\n");
+ }
+ if (isSuspended()) {
+ fprintf(stderr, " resumeIndex: Some(%u),\n", resumeIndex());
+ } else {
+ fprintf(stderr, " resumeIndex: None, /* (not suspended) */\n");
+ }
+ fprintf(stderr, "}\n");
+}
+#endif
+
+void AbstractGeneratorObject::finalSuspend(HandleObject obj) {
+ auto* genObj = &obj->as<AbstractGeneratorObject>();
+ MOZ_ASSERT(genObj->isRunning());
+ genObj->setClosed();
+}
+
+static AbstractGeneratorObject* GetGeneratorObjectForCall(JSContext* cx,
+ CallObject& callObj) {
+ // The ".generator" binding is always present and always "aliased".
+ mozilla::Maybe<PropertyInfo> prop =
+ callObj.lookup(cx, cx->names().dotGenerator);
+ if (prop.isNothing()) {
+ return nullptr;
+ }
+ Value genValue = callObj.getSlot(prop->slot());
+
+ // If the `Generator; SetAliasedVar ".generator"; InitialYield` bytecode
+ // sequence has not run yet, genValue is undefined.
+ return genValue.isObject()
+ ? &genValue.toObject().as<AbstractGeneratorObject>()
+ : nullptr;
+}
+
+AbstractGeneratorObject* js::GetGeneratorObjectForFrame(
+ JSContext* cx, AbstractFramePtr frame) {
+ cx->check(frame);
+ MOZ_ASSERT(frame.isGeneratorFrame());
+
+ if (frame.isModuleFrame()) {
+ ModuleEnvironmentObject* moduleEnv =
+ frame.script()->module()->environment();
+ mozilla::Maybe<PropertyInfo> prop =
+ moduleEnv->lookup(cx, cx->names().dotGenerator);
+ Value genValue = moduleEnv->getSlot(prop->slot());
+ return genValue.isObject()
+ ? &genValue.toObject().as<AbstractGeneratorObject>()
+ : nullptr;
+ }
+ if (!frame.hasInitialEnvironment()) {
+ return nullptr;
+ }
+
+ return GetGeneratorObjectForCall(cx, frame.callObj());
+}
+
+AbstractGeneratorObject* js::GetGeneratorObjectForEnvironment(
+ JSContext* cx, HandleObject env) {
+ auto* call = CallObject::find(env);
+ return call ? GetGeneratorObjectForCall(cx, *call) : nullptr;
+}
+
+bool js::GeneratorThrowOrReturn(JSContext* cx, AbstractFramePtr frame,
+ Handle<AbstractGeneratorObject*> genObj,
+ HandleValue arg,
+ GeneratorResumeKind resumeKind) {
+ MOZ_ASSERT(genObj->isRunning());
+ if (resumeKind == GeneratorResumeKind::Throw) {
+ cx->setPendingException(arg, ShouldCaptureStack::Maybe);
+ } else {
+ MOZ_ASSERT(resumeKind == GeneratorResumeKind::Return);
+
+ MOZ_ASSERT_IF(genObj->is<GeneratorObject>(), arg.isObject());
+ frame.setReturnValue(arg);
+
+ RootedValue closing(cx, MagicValue(JS_GENERATOR_CLOSING));
+ cx->setPendingException(closing, nullptr);
+ }
+ return false;
+}
+
+bool AbstractGeneratorObject::resume(JSContext* cx,
+ InterpreterActivation& activation,
+ Handle<AbstractGeneratorObject*> genObj,
+ HandleValue arg, HandleValue resumeKind) {
+ MOZ_ASSERT(genObj->isSuspended());
+
+ RootedFunction callee(cx, &genObj->callee());
+ RootedObject envChain(cx, &genObj->environmentChain());
+ if (!activation.resumeGeneratorFrame(callee, envChain)) {
+ return false;
+ }
+ activation.regs().fp()->setResumedGenerator();
+
+ if (genObj->hasArgsObj()) {
+ activation.regs().fp()->initArgsObj(genObj->argsObj());
+ }
+
+ if (genObj->hasStackStorage() && !genObj->isStackStorageEmpty()) {
+ JSScript* script = activation.regs().fp()->script();
+ ArrayObject* storage = &genObj->stackStorage();
+ uint32_t len = storage->getDenseInitializedLength();
+ activation.regs().fp()->restoreGeneratorSlots(storage);
+ activation.regs().sp += len - script->nfixed();
+ storage->setDenseInitializedLength(0);
+ }
+
+ JSScript* script = callee->nonLazyScript();
+ uint32_t offset = script->resumeOffsets()[genObj->resumeIndex()];
+ activation.regs().pc = script->offsetToPC(offset);
+
+ // Push arg, generator, resumeKind Values on the generator's stack.
+ activation.regs().sp += 3;
+ MOZ_ASSERT(activation.regs().spForStackDepth(activation.regs().stackDepth()));
+ activation.regs().sp[-3] = arg;
+ activation.regs().sp[-2] = ObjectValue(*genObj);
+ activation.regs().sp[-1] = resumeKind;
+
+ genObj->setRunning();
+ return true;
+}
+
+GeneratorObject* GeneratorObject::create(JSContext* cx, HandleFunction fun) {
+ MOZ_ASSERT(fun->isGenerator() && !fun->isAsync());
+
+ // FIXME: This would be faster if we could avoid doing a lookup to get
+ // the prototype for the instance. Bug 906600.
+ RootedValue pval(cx);
+ if (!GetProperty(cx, fun, fun, cx->names().prototype, &pval)) {
+ return nullptr;
+ }
+ RootedObject proto(cx, pval.isObject() ? &pval.toObject() : nullptr);
+ if (!proto) {
+ proto = GlobalObject::getOrCreateGeneratorObjectPrototype(cx, cx->global());
+ if (!proto) {
+ return nullptr;
+ }
+ }
+ return NewObjectWithGivenProto<GeneratorObject>(cx, proto);
+}
+
+const JSClass GeneratorObject::class_ = {
+ "Generator",
+ JSCLASS_HAS_RESERVED_SLOTS(GeneratorObject::RESERVED_SLOTS),
+ &classOps_,
+};
+
+const JSClassOps GeneratorObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ nullptr, // finalize
+ nullptr, // call
+ nullptr, // construct
+ CallTraceMethod<AbstractGeneratorObject>, // trace
+};
+
+static const JSFunctionSpec generator_methods[] = {
+ JS_SELF_HOSTED_FN("next", "GeneratorNext", 1, 0),
+ JS_SELF_HOSTED_FN("throw", "GeneratorThrow", 1, 0),
+ JS_SELF_HOSTED_FN("return", "GeneratorReturn", 1, 0), JS_FS_END};
+
+JSObject* js::NewTenuredObjectWithFunctionPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ RootedObject proto(cx, &cx->global()->getFunctionPrototype());
+ return NewPlainObjectWithProto(cx, proto, TenuredObject);
+}
+
+static JSObject* CreateGeneratorFunction(JSContext* cx, JSProtoKey key) {
+ RootedObject proto(cx, &cx->global()->getFunctionConstructor());
+ Handle<PropertyName*> name = cx->names().GeneratorFunction;
+ return NewFunctionWithProto(cx, Generator, 1, FunctionFlags::NATIVE_CTOR,
+ nullptr, name, proto, gc::AllocKind::FUNCTION,
+ TenuredObject);
+}
+
+static JSObject* CreateGeneratorFunctionPrototype(JSContext* cx,
+ JSProtoKey key) {
+ return NewTenuredObjectWithFunctionPrototype(cx, cx->global());
+}
+
+static bool GeneratorFunctionClassFinish(JSContext* cx,
+ HandleObject genFunction,
+ HandleObject genFunctionProto) {
+ Handle<GlobalObject*> global = cx->global();
+
+ // Change the "constructor" property to non-writable before adding any other
+ // properties, so it's still the last property and can be modified without a
+ // dictionary-mode transition.
+ MOZ_ASSERT(genFunctionProto->as<NativeObject>().getLastProperty().key() ==
+ NameToId(cx->names().constructor));
+ MOZ_ASSERT(!genFunctionProto->as<NativeObject>().inDictionaryMode());
+
+ RootedValue genFunctionVal(cx, ObjectValue(*genFunction));
+ if (!DefineDataProperty(cx, genFunctionProto, cx->names().constructor,
+ genFunctionVal, JSPROP_READONLY)) {
+ return false;
+ }
+ MOZ_ASSERT(!genFunctionProto->as<NativeObject>().inDictionaryMode());
+
+ RootedObject iteratorProto(
+ cx, GlobalObject::getOrCreateIteratorPrototype(cx, global));
+ if (!iteratorProto) {
+ return false;
+ }
+
+ RootedObject genObjectProto(cx, GlobalObject::createBlankPrototypeInheriting(
+ cx, &PlainObject::class_, iteratorProto));
+ if (!genObjectProto) {
+ return false;
+ }
+ if (!DefinePropertiesAndFunctions(cx, genObjectProto, nullptr,
+ generator_methods) ||
+ !DefineToStringTag(cx, genObjectProto, cx->names().Generator)) {
+ return false;
+ }
+
+ if (!LinkConstructorAndPrototype(cx, genFunctionProto, genObjectProto,
+ JSPROP_READONLY, JSPROP_READONLY) ||
+ !DefineToStringTag(cx, genFunctionProto, cx->names().GeneratorFunction)) {
+ return false;
+ }
+
+ global->setGeneratorObjectPrototype(genObjectProto);
+
+ return true;
+}
+
+static const ClassSpec GeneratorFunctionClassSpec = {
+ CreateGeneratorFunction,
+ CreateGeneratorFunctionPrototype,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ GeneratorFunctionClassFinish,
+ ClassSpec::DontDefineConstructor};
+
+const JSClass js::GeneratorFunctionClass = {
+ "GeneratorFunction", 0, JS_NULL_CLASS_OPS, &GeneratorFunctionClassSpec};
+
+const Value& AbstractGeneratorObject::getUnaliasedLocal(uint32_t slot) const {
+ MOZ_ASSERT(isSuspended());
+ MOZ_ASSERT(hasStackStorage());
+ MOZ_ASSERT(slot < callee().nonLazyScript()->nfixed());
+ return stackStorage().getDenseElement(slot);
+}
+
+void AbstractGeneratorObject::setUnaliasedLocal(uint32_t slot,
+ const Value& value) {
+ MOZ_ASSERT(isSuspended());
+ MOZ_ASSERT(hasStackStorage());
+ MOZ_ASSERT(slot < callee().nonLazyScript()->nfixed());
+ return stackStorage().setDenseElement(slot, value);
+}
+
+bool AbstractGeneratorObject::isAfterYield() {
+ return isAfterYieldOrAwait(JSOp::Yield);
+}
+
+bool AbstractGeneratorObject::isAfterAwait() {
+ return isAfterYieldOrAwait(JSOp::Await);
+}
+
+bool AbstractGeneratorObject::isAfterYieldOrAwait(JSOp op) {
+ if (isClosed() || isRunning()) {
+ return false;
+ }
+
+ JSScript* script = callee().nonLazyScript();
+ jsbytecode* code = script->code();
+ uint32_t nextOffset = script->resumeOffsets()[resumeIndex()];
+ if (JSOp(code[nextOffset]) != JSOp::AfterYield) {
+ return false;
+ }
+
+ static_assert(JSOpLength_Yield == JSOpLength_InitialYield,
+ "JSOp::Yield and JSOp::InitialYield must have the same length");
+ static_assert(JSOpLength_Yield == JSOpLength_Await,
+ "JSOp::Yield and JSOp::Await must have the same length");
+
+ uint32_t offset = nextOffset - JSOpLength_Yield;
+ JSOp prevOp = JSOp(code[offset]);
+ MOZ_ASSERT(prevOp == JSOp::InitialYield || prevOp == JSOp::Yield ||
+ prevOp == JSOp::Await);
+
+ return prevOp == op;
+}
+
+template <>
+bool JSObject::is<js::AbstractGeneratorObject>() const {
+ return is<GeneratorObject>() || is<AsyncFunctionGeneratorObject>() ||
+ is<AsyncGeneratorObject>();
+}
+
+GeneratorResumeKind js::ParserAtomToResumeKind(
+ frontend::TaggedParserAtomIndex atom) {
+ if (atom == frontend::TaggedParserAtomIndex::WellKnown::next()) {
+ return GeneratorResumeKind::Next;
+ }
+ if (atom == frontend::TaggedParserAtomIndex::WellKnown::throw_()) {
+ return GeneratorResumeKind::Throw;
+ }
+ MOZ_ASSERT(atom == frontend::TaggedParserAtomIndex::WellKnown::return_());
+ return GeneratorResumeKind::Return;
+}
+
+JSAtom* js::ResumeKindToAtom(JSContext* cx, GeneratorResumeKind kind) {
+ switch (kind) {
+ case GeneratorResumeKind::Next:
+ return cx->names().next;
+
+ case GeneratorResumeKind::Throw:
+ return cx->names().throw_;
+
+ case GeneratorResumeKind::Return:
+ return cx->names().return_;
+ }
+ MOZ_CRASH("Invalid resume kind");
+}
diff --git a/js/src/vm/GeneratorObject.h b/js/src/vm/GeneratorObject.h
new file mode 100644
index 0000000000..1be5ff8ba2
--- /dev/null
+++ b/js/src/vm/GeneratorObject.h
@@ -0,0 +1,255 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_GeneratorObject_h
+#define vm_GeneratorObject_h
+
+#include "js/Class.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/ArrayObject.h"
+#include "vm/BytecodeUtil.h"
+#include "vm/GeneratorResumeKind.h" // GeneratorResumeKind
+#include "vm/JSObject.h"
+#include "vm/Stack.h"
+
+namespace js {
+
+class InterpreterActivation;
+
+namespace frontend {
+class TaggedParserAtomIndex;
+}
+
+extern const JSClass GeneratorFunctionClass;
+
+class AbstractGeneratorObject : public NativeObject {
+ public:
+ // Magic value stored in the resumeIndex slot when the generator is
+ // running or closing. See the resumeIndex comment below.
+ static const int32_t RESUME_INDEX_RUNNING = INT32_MAX;
+
+ enum {
+ CALLEE_SLOT = 0,
+ ENV_CHAIN_SLOT,
+ ARGS_OBJ_SLOT,
+ STACK_STORAGE_SLOT,
+ RESUME_INDEX_SLOT,
+ RESERVED_SLOTS
+ };
+
+ // Maximum number of fixed stack slots in a generator or async function
+ // script. If a script would have more, we instead store some variables in
+ // heap EnvironmentObjects.
+ //
+ // This limit is a performance heuristic. Stack slots reduce allocations,
+ // and `Local` opcodes are a bit faster than `AliasedVar` ones; but at each
+ // `yield` or `await` the stack slots must be memcpy'd into a
+ // GeneratorObject. At some point the memcpy is too much. The limit is
+ // plenty for typical human-authored code.
+ static constexpr uint32_t FixedSlotLimit = 256;
+
+ private:
+ static JSObject* createModuleGenerator(JSContext* cx, AbstractFramePtr frame);
+
+ public:
+ static JSObject* createFromFrame(JSContext* cx, AbstractFramePtr frame);
+ static AbstractGeneratorObject* create(JSContext* cx, HandleFunction callee,
+ HandleScript script,
+ HandleObject environmentChain,
+ Handle<ArgumentsObject*> argsObject);
+
+ static bool resume(JSContext* cx, InterpreterActivation& activation,
+ Handle<AbstractGeneratorObject*> genObj, HandleValue arg,
+ HandleValue resumeKind);
+
+ static bool suspend(JSContext* cx, HandleObject obj, AbstractFramePtr frame,
+ const jsbytecode* pc, unsigned nvalues);
+
+ static void finalSuspend(HandleObject obj);
+
+ JSFunction& callee() const {
+ return getFixedSlot(CALLEE_SLOT).toObject().as<JSFunction>();
+ }
+ void setCallee(JSFunction& callee) {
+ setFixedSlot(CALLEE_SLOT, ObjectValue(callee));
+ }
+
+ JSObject& environmentChain() const {
+ return getFixedSlot(ENV_CHAIN_SLOT).toObject();
+ }
+ void setEnvironmentChain(JSObject& envChain) {
+ setFixedSlot(ENV_CHAIN_SLOT, ObjectValue(envChain));
+ }
+
+ bool hasArgsObj() const { return getFixedSlot(ARGS_OBJ_SLOT).isObject(); }
+ ArgumentsObject& argsObj() const {
+ return getFixedSlot(ARGS_OBJ_SLOT).toObject().as<ArgumentsObject>();
+ }
+ void setArgsObj(ArgumentsObject& argsObj) {
+ setFixedSlot(ARGS_OBJ_SLOT, ObjectValue(argsObj));
+ }
+
+ bool hasStackStorage() const {
+ return getFixedSlot(STACK_STORAGE_SLOT).isObject();
+ }
+ bool isStackStorageEmpty() const {
+ return stackStorage().getDenseInitializedLength() == 0;
+ }
+ ArrayObject& stackStorage() const {
+ return getFixedSlot(STACK_STORAGE_SLOT).toObject().as<ArrayObject>();
+ }
+ void setStackStorage(ArrayObject& stackStorage) {
+ setFixedSlot(STACK_STORAGE_SLOT, ObjectValue(stackStorage));
+ }
+
+ // Access stack storage. Requires `hasStackStorage() && isSuspended()`.
+ // `slot` is the index of the desired local in the stack frame when this
+ // generator is *not* suspended.
+ const Value& getUnaliasedLocal(uint32_t slot) const;
+ void setUnaliasedLocal(uint32_t slot, const Value& value);
+
+ // The resumeIndex slot is abused for a few purposes. It's undefined if
+ // it hasn't been set yet (before the initial yield), and null if the
+ // generator is closed. If the generator is running, the resumeIndex is
+ // RESUME_INDEX_RUNNING.
+ //
+ // If the generator is suspended, it's the resumeIndex (stored as
+ // JSOp::InitialYield/JSOp::Yield/JSOp::Await operand) of the yield
+ // instruction that suspended the generator. The resumeIndex can be mapped to
+ // the bytecode offset (interpreter) or to the native code offset (JIT).
+
+ bool isBeforeInitialYield() const {
+ return getFixedSlot(RESUME_INDEX_SLOT).isUndefined();
+ }
+ bool isRunning() const {
+ return getFixedSlot(RESUME_INDEX_SLOT) == Int32Value(RESUME_INDEX_RUNNING);
+ }
+ bool isSuspended() const {
+ // Note: also update Baseline's IsSuspendedGenerator code if this
+ // changes.
+ Value resumeIndex = getFixedSlot(RESUME_INDEX_SLOT);
+ return resumeIndex.isInt32() &&
+ resumeIndex.toInt32() < RESUME_INDEX_RUNNING;
+ }
+ void setRunning() {
+ MOZ_ASSERT(isSuspended());
+ setFixedSlot(RESUME_INDEX_SLOT, Int32Value(RESUME_INDEX_RUNNING));
+ }
+ void setResumeIndex(const jsbytecode* pc) {
+ MOZ_ASSERT(JSOp(*pc) == JSOp::InitialYield || JSOp(*pc) == JSOp::Yield ||
+ JSOp(*pc) == JSOp::Await);
+
+ MOZ_ASSERT_IF(JSOp(*pc) == JSOp::InitialYield,
+ getFixedSlot(RESUME_INDEX_SLOT).isUndefined());
+ MOZ_ASSERT_IF(JSOp(*pc) != JSOp::InitialYield, isRunning());
+
+ uint32_t resumeIndex = GET_UINT24(pc);
+ MOZ_ASSERT(resumeIndex < uint32_t(RESUME_INDEX_RUNNING));
+
+ setFixedSlot(RESUME_INDEX_SLOT, Int32Value(resumeIndex));
+ MOZ_ASSERT(isSuspended());
+ }
+ void setResumeIndex(int32_t resumeIndex) {
+ setFixedSlot(RESUME_INDEX_SLOT, Int32Value(resumeIndex));
+ }
+ uint32_t resumeIndex() const {
+ MOZ_ASSERT(isSuspended());
+ return getFixedSlot(RESUME_INDEX_SLOT).toInt32();
+ }
+ bool isClosed() const { return getFixedSlot(CALLEE_SLOT).isNull(); }
+ void setClosed() {
+ setFixedSlot(CALLEE_SLOT, NullValue());
+ setFixedSlot(ENV_CHAIN_SLOT, NullValue());
+ setFixedSlot(ARGS_OBJ_SLOT, NullValue());
+ setFixedSlot(STACK_STORAGE_SLOT, NullValue());
+ setFixedSlot(RESUME_INDEX_SLOT, NullValue());
+ }
+
+ bool isAfterYield();
+ bool isAfterAwait();
+
+ private:
+ bool isAfterYieldOrAwait(JSOp op);
+
+ public:
+ void trace(JSTracer* trc);
+
+ static size_t offsetOfCalleeSlot() { return getFixedSlotOffset(CALLEE_SLOT); }
+ static size_t offsetOfEnvironmentChainSlot() {
+ return getFixedSlotOffset(ENV_CHAIN_SLOT);
+ }
+ static size_t offsetOfArgsObjSlot() {
+ return getFixedSlotOffset(ARGS_OBJ_SLOT);
+ }
+ static size_t offsetOfResumeIndexSlot() {
+ return getFixedSlotOffset(RESUME_INDEX_SLOT);
+ }
+ static size_t offsetOfStackStorageSlot() {
+ return getFixedSlotOffset(STACK_STORAGE_SLOT);
+ }
+
+ static size_t calleeSlot() { return CALLEE_SLOT; }
+ static size_t envChainSlot() { return ENV_CHAIN_SLOT; }
+ static size_t argsObjectSlot() { return ARGS_OBJ_SLOT; }
+ static size_t stackStorageSlot() { return STACK_STORAGE_SLOT; }
+ static size_t resumeIndexSlot() { return RESUME_INDEX_SLOT; }
+
+#ifdef DEBUG
+ void dump() const;
+#endif
+};
+
+class GeneratorObject : public AbstractGeneratorObject {
+ public:
+ enum { RESERVED_SLOTS = AbstractGeneratorObject::RESERVED_SLOTS };
+
+ static const JSClass class_;
+ static const JSClassOps classOps_;
+
+ static GeneratorObject* create(JSContext* cx, HandleFunction fun);
+};
+
+bool GeneratorThrowOrReturn(JSContext* cx, AbstractFramePtr frame,
+ Handle<AbstractGeneratorObject*> obj,
+ HandleValue val, GeneratorResumeKind resumeKind);
+
+/**
+ * Return the generator object associated with the given frame. The frame must
+ * be a call frame for a generator.
+ *
+ * This may return nullptr at certain points in the generator lifecycle:
+ *
+ * - While a generator call evaluates default argument values and performs
+ * destructuring, which occurs before the generator object is created.
+ *
+ * - Between the `Generator` instruction and the `SetAliasedVar .generator`
+ * instruction, at which point the generator object does exist, but is held
+ * only on the stack, and not the `.generator` pseudo-variable this function
+ * consults.
+ */
+AbstractGeneratorObject* GetGeneratorObjectForFrame(JSContext* cx,
+ AbstractFramePtr frame);
+
+/**
+ * If `env` or any enclosing environment is a `CallObject` associated with a
+ * generator object, return the generator.
+ *
+ * Otherwise `env` is not in a generator or async function, or the generator
+ * object hasn't been created yet; return nullptr with no pending exception.
+ */
+AbstractGeneratorObject* GetGeneratorObjectForEnvironment(JSContext* cx,
+ HandleObject env);
+
+GeneratorResumeKind ParserAtomToResumeKind(
+ frontend::TaggedParserAtomIndex atom);
+JSAtom* ResumeKindToAtom(JSContext* cx, GeneratorResumeKind kind);
+
+} // namespace js
+
+template <>
+bool JSObject::is<js::AbstractGeneratorObject>() const;
+
+#endif /* vm_GeneratorObject_h */
diff --git a/js/src/vm/GeneratorResumeKind.h b/js/src/vm/GeneratorResumeKind.h
new file mode 100644
index 0000000000..7ff6c0a76b
--- /dev/null
+++ b/js/src/vm/GeneratorResumeKind.h
@@ -0,0 +1,18 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_GeneratorResumeKind_h
+#define vm_GeneratorResumeKind_h
+
+#include <stdint.h> // uint8_t
+
+namespace js {
+
+enum class GeneratorResumeKind : uint8_t { Next, Throw, Return };
+
+} // namespace js
+
+#endif /* vm_GeneratorResumeKind_h */
diff --git a/js/src/vm/GetterSetter.cpp b/js/src/vm/GetterSetter.cpp
new file mode 100644
index 0000000000..791505d38d
--- /dev/null
+++ b/js/src/vm/GetterSetter.cpp
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/GetterSetter.h"
+
+#include "gc/Allocator.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+
+using namespace js;
+
+js::GetterSetter::GetterSetter(HandleObject getter, HandleObject setter)
+ : TenuredCellWithGCPointer(getter), setter_(setter) {}
+
+// static
+GetterSetter* GetterSetter::create(JSContext* cx, HandleObject getter,
+ HandleObject setter) {
+ return cx->newCell<GetterSetter>(getter, setter);
+}
+
+JS::ubi::Node::Size JS::ubi::Concrete<GetterSetter>::size(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return js::gc::Arena::thingSize(get().asTenured().getAllocKind());
+}
diff --git a/js/src/vm/GetterSetter.h b/js/src/vm/GetterSetter.h
new file mode 100644
index 0000000000..d1e2fe4fc7
--- /dev/null
+++ b/js/src/vm/GetterSetter.h
@@ -0,0 +1,116 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_GetterSetter_h
+#define vm_GetterSetter_h
+
+#include "gc/Barrier.h" // js::GCPtr<JSObject*>
+#include "gc/Cell.h" // js::gc::TenuredCellWithGCPointer
+
+#include "js/TypeDecls.h" // JS::HandleObject
+#include "js/UbiNode.h" // JS::ubi::TracerConcrete
+
+namespace js {
+
+// [SMDOC] Getter/Setter Properties
+//
+// Getter/setter properties are implemented similar to plain data properties:
+// the shape contains the property's key, attributes, and slot number, but the
+// getter/setter objects are stored separately as part of the object.
+//
+// To simplify the NativeObject and Shape code, a single slot is allocated for
+// each getter/setter property (again similar to data properties). This slot
+// contains a PrivateGCThingValue pointing to a js::GetterSetter instance.
+//
+// js::GetterSetter
+// ================
+// js::GetterSetter is an immutable type that stores the getter/setter objects.
+// Because accessor properties can be defined with only a getter or only a
+// setter, a GetterSetter's objects can be nullptr.
+//
+// JIT/IC Guards
+// =============
+// An object's shape implies a certain property is an accessor, but it does not
+// imply the identity of the getter/setter objects. This means IC code needs to
+// guard on the slot value (the GetterSetter*) when optimizing a call to a
+// particular getter/setter function.
+//
+// See EmitGuardGetterSetterSlot in jit/CacheIR.cpp.
+//
+// HadGetterSetterChange Optimization
+// ==================================
+// Some getters and setters defined on the prototype chain are very hot, for
+// example the 'length' getter for typed arrays. To avoid the GetterSetter guard
+// in the common case, when attaching a stub for a known 'holder' object, we
+// use the HadGetterSetterChange object flag.
+//
+// When this flag is not set, the object is guaranteed to get a different shape
+// when an accessor property is either deleted or mutated, because when that
+// happens the HadGetterSetterChange will be set which triggers a shape change.
+//
+// This means CacheIR does not have to guard on the GetterSetter slot for
+// accessors on the prototype chain until the first time an accessor property is
+// mutated or deleted.
+class GetterSetter : public gc::TenuredCellWithGCPointer<JSObject> {
+ friend class gc::CellAllocator;
+
+ public:
+ // Getter object, stored in the cell header.
+ JSObject* getter() const { return headerPtr(); }
+
+ GCPtr<JSObject*> setter_;
+
+#ifndef JS_64BIT
+ // Ensure size >= MinCellSize on 32-bit platforms.
+ uint64_t padding_ = 0;
+#endif
+
+ private:
+ GetterSetter(HandleObject getter, HandleObject setter);
+
+ public:
+ static GetterSetter* create(JSContext* cx, HandleObject getter,
+ HandleObject setter);
+
+ JSObject* setter() const { return setter_; }
+
+ static const JS::TraceKind TraceKind = JS::TraceKind::GetterSetter;
+
+ void traceChildren(JSTracer* trc);
+
+ void finalize(JS::GCContext* gcx) {
+ // Nothing to do.
+ }
+};
+
+} // namespace js
+
+// JS::ubi::Nodes can point to GetterSetters; they're js::gc::Cell instances
+// with no associated compartment.
+namespace JS {
+namespace ubi {
+
+template <>
+class Concrete<js::GetterSetter> : TracerConcrete<js::GetterSetter> {
+ protected:
+ explicit Concrete(js::GetterSetter* ptr)
+ : TracerConcrete<js::GetterSetter>(ptr) {}
+
+ public:
+ static void construct(void* storage, js::GetterSetter* ptr) {
+ new (storage) Concrete(ptr);
+ }
+
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override;
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+} // namespace ubi
+} // namespace JS
+
+#endif // vm_GetterSetter_h
diff --git a/js/src/vm/GlobalObject-inl.h b/js/src/vm/GlobalObject-inl.h
new file mode 100644
index 0000000000..4b5e4be39c
--- /dev/null
+++ b/js/src/vm/GlobalObject-inl.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_GlobalObject_inl_h
+#define vm_GlobalObject_inl_h
+
+#include "vm/GlobalObject.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include "vm/JSContext.h" // JSContext
+#include "vm/ObjectOperations-inl.h" // js::SetProperty
+
+/* static */ inline bool js::GlobalObject::setIntrinsicValue(
+ JSContext* cx, Handle<GlobalObject*> global, Handle<PropertyName*> name,
+ HandleValue value) {
+ Rooted<NativeObject*> holder(cx, global->getComputedIntrinsicsHolder());
+ MOZ_ASSERT(holder->lookupPure(name).isNothing(),
+ "SetIntrinsic tried to redefine existing intrinsic");
+ return SetProperty(cx, holder, name, value);
+}
+
+#endif /* vm_GlobalObject_inl_h */
diff --git a/js/src/vm/GlobalObject.cpp b/js/src/vm/GlobalObject.cpp
new file mode 100644
index 0000000000..6924cce0ad
--- /dev/null
+++ b/js/src/vm/GlobalObject.cpp
@@ -0,0 +1,1052 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/GlobalObject.h"
+
+#include "jsapi.h"
+#include "jsfriendapi.h"
+
+#include "builtin/AtomicsObject.h"
+#include "builtin/BigInt.h"
+#include "builtin/DataViewObject.h"
+#ifdef JS_HAS_INTL_API
+# include "builtin/intl/Collator.h"
+# include "builtin/intl/DateTimeFormat.h"
+# include "builtin/intl/DisplayNames.h"
+# include "builtin/intl/ListFormat.h"
+# include "builtin/intl/Locale.h"
+# include "builtin/intl/NumberFormat.h"
+# include "builtin/intl/PluralRules.h"
+# include "builtin/intl/RelativeTimeFormat.h"
+#endif
+#include "builtin/FinalizationRegistryObject.h"
+#include "builtin/MapObject.h"
+#include "builtin/ShadowRealm.h"
+#include "builtin/Symbol.h"
+#include "builtin/WeakMapObject.h"
+#include "builtin/WeakRefObject.h"
+#include "builtin/WeakSetObject.h"
+#include "debugger/DebugAPI.h"
+#include "frontend/CompilationStencil.h"
+#include "gc/FinalizationObservers.h"
+#include "gc/GC.h"
+#include "gc/GCContext.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/friend/WindowProxy.h" // js::ToWindowProxyIfWindow
+#include "js/PropertyAndElement.h" // JS_DefineFunctions, JS_DefineProperties
+#include "js/ProtoKey.h"
+#include "vm/AsyncFunction.h"
+#include "vm/AsyncIteration.h"
+#include "vm/BooleanObject.h"
+#include "vm/Compartment.h"
+#include "vm/DateObject.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/ErrorObject.h"
+#include "vm/GeneratorObject.h"
+#include "vm/JSContext.h"
+#include "vm/NumberObject.h"
+#include "vm/PIC.h"
+#include "vm/PlainObject.h"
+#include "vm/RegExpObject.h"
+#include "vm/RegExpStatics.h"
+#include "vm/SelfHosting.h"
+#include "vm/StringObject.h"
+#include "wasm/WasmJS.h"
+#ifdef ENABLE_RECORD_TUPLE
+# include "vm/RecordType.h"
+# include "vm/TupleType.h"
+#endif
+
+#include "gc/GCContext-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/Realm-inl.h"
+
+using namespace js;
+
+namespace js {
+
+extern const JSClass IntlClass;
+extern const JSClass JSONClass;
+extern const JSClass MathClass;
+extern const JSClass ReflectClass;
+
+} // namespace js
+
+static const JSClass* const protoTable[JSProto_LIMIT] = {
+#define INIT_FUNC(name, clasp) clasp,
+#define INIT_FUNC_DUMMY(name, clasp) nullptr,
+ JS_FOR_PROTOTYPES(INIT_FUNC, INIT_FUNC_DUMMY)
+#undef INIT_FUNC_DUMMY
+#undef INIT_FUNC
+};
+
+JS_PUBLIC_API const JSClass* js::ProtoKeyToClass(JSProtoKey key) {
+ MOZ_ASSERT(key < JSProto_LIMIT);
+ return protoTable[key];
+}
+
+/* static */
+bool GlobalObject::skipDeselectedConstructor(JSContext* cx, JSProtoKey key) {
+ switch (key) {
+ case JSProto_Null:
+ case JSProto_Object:
+ case JSProto_Function:
+ case JSProto_BoundFunction:
+ case JSProto_Array:
+ case JSProto_Boolean:
+ case JSProto_JSON:
+ case JSProto_Date:
+ case JSProto_Math:
+ case JSProto_Number:
+ case JSProto_String:
+ case JSProto_RegExp:
+ case JSProto_Error:
+ case JSProto_InternalError:
+ case JSProto_AggregateError:
+ case JSProto_EvalError:
+ case JSProto_RangeError:
+ case JSProto_ReferenceError:
+ case JSProto_SyntaxError:
+ case JSProto_TypeError:
+ case JSProto_URIError:
+ case JSProto_DebuggeeWouldRun:
+ case JSProto_CompileError:
+ case JSProto_LinkError:
+ case JSProto_RuntimeError:
+ case JSProto_ArrayBuffer:
+ case JSProto_Int8Array:
+ case JSProto_Uint8Array:
+ case JSProto_Int16Array:
+ case JSProto_Uint16Array:
+ case JSProto_Int32Array:
+ case JSProto_Uint32Array:
+ case JSProto_Float32Array:
+ case JSProto_Float64Array:
+ case JSProto_Uint8ClampedArray:
+ case JSProto_BigInt64Array:
+ case JSProto_BigUint64Array:
+ case JSProto_BigInt:
+ case JSProto_Proxy:
+ case JSProto_WeakMap:
+ case JSProto_Map:
+ case JSProto_Set:
+ case JSProto_DataView:
+ case JSProto_Symbol:
+ case JSProto_Reflect:
+ case JSProto_WeakSet:
+ case JSProto_TypedArray:
+ case JSProto_SavedFrame:
+ case JSProto_Promise:
+ case JSProto_AsyncFunction:
+ case JSProto_GeneratorFunction:
+ case JSProto_AsyncGeneratorFunction:
+#ifdef ENABLE_RECORD_TUPLE
+ case JSProto_Record:
+ case JSProto_Tuple:
+#endif
+ return false;
+
+ case JSProto_WebAssembly:
+ return !wasm::HasSupport(cx);
+
+ case JSProto_WasmModule:
+ case JSProto_WasmInstance:
+ case JSProto_WasmMemory:
+ case JSProto_WasmTable:
+ case JSProto_WasmGlobal:
+ case JSProto_WasmTag:
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ case JSProto_WasmFunction:
+#endif
+ case JSProto_WasmException:
+ return false;
+
+#ifdef JS_HAS_INTL_API
+ case JSProto_Intl:
+ case JSProto_Collator:
+ case JSProto_DateTimeFormat:
+ case JSProto_DisplayNames:
+ case JSProto_Locale:
+ case JSProto_ListFormat:
+ case JSProto_NumberFormat:
+ case JSProto_PluralRules:
+ case JSProto_RelativeTimeFormat:
+ return false;
+#endif
+
+ // Return true if the given constructor has been disabled at run-time.
+ case JSProto_Atomics:
+ case JSProto_SharedArrayBuffer:
+ return !cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled();
+
+ case JSProto_WeakRef:
+ case JSProto_FinalizationRegistry:
+ return cx->realm()->creationOptions().getWeakRefsEnabled() ==
+ JS::WeakRefSpecifier::Disabled;
+
+ case JSProto_Iterator:
+ case JSProto_AsyncIterator:
+ return !cx->realm()->creationOptions().getIteratorHelpersEnabled();
+
+ case JSProto_ShadowRealm:
+ return !cx->realm()->creationOptions().getShadowRealmsEnabled();
+
+ default:
+ MOZ_CRASH("unexpected JSProtoKey");
+ }
+}
+
+static bool ShouldFreezeBuiltin(JSProtoKey key) {
+ switch (key) {
+ case JSProto_Object:
+ case JSProto_Array:
+ case JSProto_Function:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static unsigned GetAttrsForResolvedGlobal(GlobalObject* global,
+ JSProtoKey key) {
+ unsigned attrs = JSPROP_RESOLVING;
+ if (global->realm()->creationOptions().freezeBuiltins() &&
+ ShouldFreezeBuiltin(key)) {
+ attrs |= JSPROP_PERMANENT | JSPROP_READONLY;
+ }
+ return attrs;
+}
+
+/* static*/
+bool GlobalObject::resolveConstructor(JSContext* cx,
+ Handle<GlobalObject*> global,
+ JSProtoKey key, IfClassIsDisabled mode) {
+ MOZ_ASSERT(key != JSProto_Null);
+ MOZ_ASSERT(key != JSProto_BoundFunction,
+ "bound functions don't have their own proto object");
+ MOZ_ASSERT(!global->isStandardClassResolved(key));
+ MOZ_ASSERT(cx->compartment() == global->compartment());
+
+ // |global| must be same-compartment but make sure we're in its realm: the
+ // code below relies on this.
+ AutoRealm ar(cx, global);
+
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+
+ // Prohibit collection of allocation metadata. Metadata builders shouldn't
+ // need to observe lazily-constructed prototype objects coming into
+ // existence. And assertions start to fail when the builder itself attempts
+ // an allocation that re-entrantly tries to create the same prototype.
+ AutoSuppressAllocationMetadataBuilder suppressMetadata(cx);
+
+ // Constructor resolution may execute self-hosted scripts. These
+ // self-hosted scripts do not call out to user code by construction. Allow
+ // all scripts to execute, even in debuggee compartments that are paused.
+ AutoSuppressDebuggeeNoExecuteChecks suppressNX(cx);
+
+ // Some classes can be disabled at compile time, others at run time;
+ // if a feature is compile-time disabled, clasp is null.
+ const JSClass* clasp = ProtoKeyToClass(key);
+ if (!clasp || skipDeselectedConstructor(cx, key)) {
+ if (mode == IfClassIsDisabled::Throw) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CONSTRUCTOR_DISABLED,
+ clasp ? clasp->name : "constructor");
+ return false;
+ }
+ return true;
+ }
+
+ // Class spec must have a constructor defined.
+ if (!clasp->specDefined()) {
+ return true;
+ }
+
+ bool isObjectOrFunction = key == JSProto_Function || key == JSProto_Object;
+
+ // We need to create the prototype first, and immediately stash it in the
+ // slot. This is so the following bootstrap ordering is possible:
+ // * Object.prototype
+ // * Function.prototype
+ // * Function
+ // * Object
+ //
+ // We get the above when Object is resolved before Function. If Function
+ // is resolved before Object, we'll end up re-entering resolveConstructor
+ // for Function, which is a problem. So if Function is being resolved
+ // before Object.prototype exists, we just resolve Object instead, since we
+ // know that Function will also be resolved before we return.
+ if (key == JSProto_Function && !global->hasPrototype(JSProto_Object)) {
+ return resolveConstructor(cx, global, JSProto_Object,
+ IfClassIsDisabled::DoNothing);
+ }
+
+ // %IteratorPrototype%.map.[[Prototype]] is %Generator% and
+ // %Generator%.prototype.[[Prototype]] is %IteratorPrototype%.
+ // A workaround in initIteratorProto prevents runaway mutual recursion while
+ // setting these up. Ensure the workaround is triggered already:
+ if (key == JSProto_GeneratorFunction &&
+ !global->hasBuiltinProto(ProtoKind::IteratorProto)) {
+ if (!getOrCreateIteratorPrototype(cx, global)) {
+ return false;
+ }
+
+ // If iterator helpers are enabled, populating %IteratorPrototype% will
+ // have recursively gone through here.
+ if (global->isStandardClassResolved(key)) {
+ return true;
+ }
+ }
+
+ // We don't always have a prototype (i.e. Math and JSON). If we don't,
+ // |createPrototype|, |prototypeFunctions|, and |prototypeProperties|
+ // should all be null.
+ RootedObject proto(cx);
+ if (ClassObjectCreationOp createPrototype =
+ clasp->specCreatePrototypeHook()) {
+ proto = createPrototype(cx, key);
+ if (!proto) {
+ return false;
+ }
+
+ if (isObjectOrFunction) {
+ // Make sure that creating the prototype didn't recursively resolve
+ // our own constructor. We can't just assert that there's no
+ // prototype; OOMs can result in incomplete resolutions in which
+ // the prototype is saved but not the constructor. So use the same
+ // criteria that protects entry into this function.
+ MOZ_ASSERT(!global->isStandardClassResolved(key));
+
+ global->setPrototype(key, proto);
+ }
+ }
+
+ // Create the constructor.
+ RootedObject ctor(cx, clasp->specCreateConstructorHook()(cx, key));
+ if (!ctor) {
+ return false;
+ }
+
+ RootedId id(cx, NameToId(ClassName(key, cx)));
+ if (isObjectOrFunction) {
+ if (clasp->specShouldDefineConstructor()) {
+ RootedValue ctorValue(cx, ObjectValue(*ctor));
+ unsigned attrs = GetAttrsForResolvedGlobal(global, key);
+ if (!DefineDataProperty(cx, global, id, ctorValue, attrs)) {
+ return false;
+ }
+ }
+
+ global->setConstructor(key, ctor);
+ }
+
+ if (const JSFunctionSpec* funs = clasp->specPrototypeFunctions()) {
+ if (!JS_DefineFunctions(cx, proto, funs)) {
+ return false;
+ }
+ }
+ if (const JSPropertySpec* props = clasp->specPrototypeProperties()) {
+ if (!JS_DefineProperties(cx, proto, props)) {
+ return false;
+ }
+ }
+ if (const JSFunctionSpec* funs = clasp->specConstructorFunctions()) {
+ if (!JS_DefineFunctions(cx, ctor, funs)) {
+ return false;
+ }
+ }
+ if (const JSPropertySpec* props = clasp->specConstructorProperties()) {
+ if (!JS_DefineProperties(cx, ctor, props)) {
+ return false;
+ }
+ }
+
+ // If the prototype exists, link it with the constructor.
+ if (proto && !LinkConstructorAndPrototype(cx, ctor, proto)) {
+ return false;
+ }
+
+ // Call the post-initialization hook, if provided.
+ if (FinishClassInitOp finishInit = clasp->specFinishInitHook()) {
+ if (!finishInit(cx, ctor, proto)) {
+ return false;
+ }
+ }
+
+ if (ShouldFreezeBuiltin(key)) {
+ if (!JS::MaybeFreezeCtorAndPrototype(cx, ctor, proto)) {
+ return false;
+ }
+ }
+
+ if (!isObjectOrFunction) {
+ // Any operations that modifies the global object should be placed
+ // after any other fallible operations.
+
+ // Fallible operation that modifies the global object.
+ if (clasp->specShouldDefineConstructor()) {
+ bool shouldReallyDefine = true;
+
+ // On the web, it isn't presently possible to expose the global
+ // "SharedArrayBuffer" property unless the page is cross-site-isolated.
+ // Only define this constructor if an option on the realm indicates that
+ // it should be defined.
+ if (key == JSProto_SharedArrayBuffer) {
+ const JS::RealmCreationOptions& options =
+ global->realm()->creationOptions();
+
+ MOZ_ASSERT(options.getSharedMemoryAndAtomicsEnabled(),
+ "shouldn't be defining SharedArrayBuffer if shared memory "
+ "is disabled");
+
+ shouldReallyDefine = options.defineSharedArrayBufferConstructor();
+ }
+
+ if (shouldReallyDefine) {
+ RootedValue ctorValue(cx, ObjectValue(*ctor));
+ unsigned attrs = GetAttrsForResolvedGlobal(global, key);
+ if (!DefineDataProperty(cx, global, id, ctorValue, attrs)) {
+ return false;
+ }
+ }
+ }
+
+ // Infallible operations that modify the global object.
+ global->setConstructor(key, ctor);
+ if (proto) {
+ global->setPrototype(key, proto);
+ }
+ }
+
+ return true;
+}
+
+// Resolve a "globalThis" self-referential property if necessary,
+// per a stage-3 proposal. https://github.com/tc39/ecma262/pull/702
+//
+// We could also do this in |FinishObjectClassInit| to trim the global
+// resolve hook. Unfortunately, |ToWindowProxyIfWindow| doesn't work then:
+// the browser's |nsGlobalWindow::SetNewDocument| invokes Object init
+// *before* it sets the global's WindowProxy using |js::SetWindowProxy|.
+//
+// Refactoring global object creation code to support this approach is a
+// challenge for another day.
+/* static */
+bool GlobalObject::maybeResolveGlobalThis(JSContext* cx,
+ Handle<GlobalObject*> global,
+ bool* resolved) {
+ if (!global->data().globalThisResolved) {
+ RootedValue v(cx, ObjectValue(*ToWindowProxyIfWindow(global)));
+ if (!DefineDataProperty(cx, global, cx->names().globalThis, v,
+ JSPROP_RESOLVING)) {
+ return false;
+ }
+
+ *resolved = true;
+ global->data().globalThisResolved = true;
+ }
+
+ return true;
+}
+
+/* static */
+JSObject* GlobalObject::createBuiltinProto(JSContext* cx,
+ Handle<GlobalObject*> global,
+ ProtoKind kind, ObjectInitOp init) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ if (!init(cx, global)) {
+ return nullptr;
+ }
+
+ return &global->getBuiltinProto(kind);
+}
+
+JSObject* GlobalObject::createBuiltinProto(JSContext* cx,
+ Handle<GlobalObject*> global,
+ ProtoKind kind, Handle<JSAtom*> tag,
+ ObjectInitWithTagOp init) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ if (!init(cx, global, tag)) {
+ return nullptr;
+ }
+
+ return &global->getBuiltinProto(kind);
+}
+
+static bool ThrowTypeError(JSContext* cx, unsigned argc, Value* vp) {
+ ThrowTypeErrorBehavior(cx);
+ return false;
+}
+
+/* static */
+JSObject* GlobalObject::getOrCreateThrowTypeError(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (JSFunction* fun = global->data().throwTypeError) {
+ return fun;
+ }
+
+ // Construct the unique [[%ThrowTypeError%]] function object, used only for
+ // "callee" and "caller" accessors on strict mode arguments objects. (The
+ // spec also uses this for "arguments" and "caller" on various functions,
+ // but we're experimenting with implementing them using accessors on
+ // |Function.prototype| right now.)
+
+ RootedFunction throwTypeError(
+ cx, NewNativeFunction(cx, ThrowTypeError, 0, nullptr));
+ if (!throwTypeError || !PreventExtensions(cx, throwTypeError)) {
+ return nullptr;
+ }
+
+ // The "length" property of %ThrowTypeError% is non-configurable.
+ Rooted<PropertyDescriptor> nonConfigurableDesc(cx,
+ PropertyDescriptor::Empty());
+ nonConfigurableDesc.setConfigurable(false);
+
+ RootedId lengthId(cx, NameToId(cx->names().length));
+ ObjectOpResult lengthResult;
+ if (!NativeDefineProperty(cx, throwTypeError, lengthId, nonConfigurableDesc,
+ lengthResult)) {
+ return nullptr;
+ }
+ MOZ_ASSERT(lengthResult);
+
+ // The "name" property of %ThrowTypeError% is non-configurable, adjust
+ // the default property attributes accordingly.
+ RootedId nameId(cx, NameToId(cx->names().name));
+ ObjectOpResult nameResult;
+ if (!NativeDefineProperty(cx, throwTypeError, nameId, nonConfigurableDesc,
+ nameResult)) {
+ return nullptr;
+ }
+ MOZ_ASSERT(nameResult);
+
+ global->data().throwTypeError.init(throwTypeError);
+ return throwTypeError;
+}
+
+GlobalObject* GlobalObject::createInternal(JSContext* cx,
+ const JSClass* clasp) {
+ MOZ_ASSERT(clasp->flags & JSCLASS_IS_GLOBAL);
+ MOZ_ASSERT(clasp->isTrace(JS_GlobalObjectTraceHook));
+
+ JSObject* obj = NewTenuredObjectWithGivenProto(cx, clasp, nullptr);
+ if (!obj) {
+ return nullptr;
+ }
+
+ Rooted<GlobalObject*> global(cx, &obj->as<GlobalObject>());
+ MOZ_ASSERT(global->isUnqualifiedVarObj());
+
+ {
+ auto data = cx->make_unique<GlobalObjectData>(cx->zone());
+ if (!data) {
+ return nullptr;
+ }
+ // Note: it's important for the realm's global to be initialized at the
+ // same time as the global's GlobalObjectData, because we free the global's
+ // data when Realm::global_ is cleared.
+ cx->realm()->initGlobal(*global);
+ InitReservedSlot(global, GLOBAL_DATA_SLOT, data.release(),
+ MemoryUse::GlobalObjectData);
+ }
+
+ Rooted<GlobalLexicalEnvironmentObject*> lexical(
+ cx, GlobalLexicalEnvironmentObject::create(cx, global));
+ if (!lexical) {
+ return nullptr;
+ }
+ global->data().lexicalEnvironment.init(lexical);
+
+ Rooted<GlobalScope*> emptyGlobalScope(
+ cx, GlobalScope::createEmpty(cx, ScopeKind::Global));
+ if (!emptyGlobalScope) {
+ return nullptr;
+ }
+ global->data().emptyGlobalScope.init(emptyGlobalScope);
+
+ if (!GlobalObject::createIntrinsicsHolder(cx, global)) {
+ return nullptr;
+ }
+
+ if (!JSObject::setQualifiedVarObj(cx, global)) {
+ return nullptr;
+ }
+ if (!JSObject::setGenerationCountedGlobal(cx, global)) {
+ return nullptr;
+ }
+
+ return global;
+}
+
+/* static */
+GlobalObject* GlobalObject::new_(JSContext* cx, const JSClass* clasp,
+ JSPrincipals* principals,
+ JS::OnNewGlobalHookOption hookOption,
+ const JS::RealmOptions& options) {
+ MOZ_ASSERT(!cx->isExceptionPending());
+ MOZ_ASSERT_IF(cx->zone(), !cx->zone()->isAtomsZone());
+
+ // If we are creating a new global in an existing compartment, make sure the
+ // compartment has a live global at all times (by rooting it here).
+ // See bug 1530364.
+ Rooted<GlobalObject*> existingGlobal(cx);
+ const JS::RealmCreationOptions& creationOptions = options.creationOptions();
+ if (creationOptions.compartmentSpecifier() ==
+ JS::CompartmentSpecifier::ExistingCompartment) {
+ Compartment* comp = creationOptions.compartment();
+ existingGlobal = &comp->firstGlobal();
+ }
+
+ Realm* realm = NewRealm(cx, principals, options);
+ if (!realm) {
+ return nullptr;
+ }
+
+ Rooted<GlobalObject*> global(cx);
+ {
+ AutoRealmUnchecked ar(cx, realm);
+ global = GlobalObject::createInternal(cx, clasp);
+ if (!global) {
+ return nullptr;
+ }
+
+ // Make transactional initialization of these constructors by discarding the
+ // incompletely initialized global if an error occur. This also ensures the
+ // global's prototype chain is initialized (in FinishObjectClassInit).
+ if (!ensureConstructor(cx, global, JSProto_Object) ||
+ !ensureConstructor(cx, global, JSProto_Function)) {
+ return nullptr;
+ }
+
+ realm->clearInitializingGlobal();
+ if (hookOption == JS::FireOnNewGlobalHook) {
+ JS_FireOnNewGlobalObject(cx, global);
+ }
+ }
+
+ return global;
+}
+
+GlobalScope& GlobalObject::emptyGlobalScope() const {
+ return *data().emptyGlobalScope;
+}
+
+bool GlobalObject::valueIsEval(const Value& val) {
+ return val.isObject() && data().eval == &val.toObject();
+}
+
+/* static */
+bool GlobalObject::initStandardClasses(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ /* Define a top-level property 'undefined' with the undefined value. */
+ if (!DefineDataProperty(
+ cx, global, cx->names().undefined, UndefinedHandleValue,
+ JSPROP_PERMANENT | JSPROP_READONLY | JSPROP_RESOLVING)) {
+ return false;
+ }
+
+ // Resolve a "globalThis" self-referential property if necessary.
+ bool resolved;
+ if (!GlobalObject::maybeResolveGlobalThis(cx, global, &resolved)) {
+ return false;
+ }
+
+ for (size_t k = 0; k < JSProto_LIMIT; ++k) {
+ JSProtoKey key = static_cast<JSProtoKey>(k);
+ if (key != JSProto_Null && key != JSProto_BoundFunction &&
+ !global->isStandardClassResolved(key)) {
+ if (!resolveConstructor(cx, global, static_cast<JSProtoKey>(k),
+ IfClassIsDisabled::DoNothing)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+/* static */
+JSFunction* GlobalObject::createConstructor(JSContext* cx, Native ctor,
+ JSAtom* nameArg, unsigned length,
+ gc::AllocKind kind,
+ const JSJitInfo* jitInfo) {
+ Rooted<JSAtom*> name(cx, nameArg);
+ JSFunction* fun = NewNativeConstructor(cx, ctor, length, name, kind);
+ if (!fun) {
+ return nullptr;
+ }
+
+ if (jitInfo) {
+ fun->setJitInfo(jitInfo);
+ }
+
+ return fun;
+}
+
+static NativeObject* CreateBlankProto(JSContext* cx, const JSClass* clasp,
+ HandleObject proto) {
+ MOZ_ASSERT(!clasp->isJSFunction());
+
+ if (clasp == &PlainObject::class_) {
+ return NewPlainObjectWithProto(cx, proto, TenuredObject);
+ }
+
+ return NewTenuredObjectWithGivenProto(cx, clasp, proto);
+}
+
+/* static */
+NativeObject* GlobalObject::createBlankPrototype(JSContext* cx,
+ Handle<GlobalObject*> global,
+ const JSClass* clasp) {
+ RootedObject objectProto(cx, &global->getObjectPrototype());
+ return CreateBlankProto(cx, clasp, objectProto);
+}
+
+/* static */
+NativeObject* GlobalObject::createBlankPrototypeInheriting(JSContext* cx,
+ const JSClass* clasp,
+ HandleObject proto) {
+ return CreateBlankProto(cx, clasp, proto);
+}
+
+bool js::LinkConstructorAndPrototype(JSContext* cx, JSObject* ctor_,
+ JSObject* proto_, unsigned prototypeAttrs,
+ unsigned constructorAttrs) {
+ RootedObject ctor(cx, ctor_), proto(cx, proto_);
+
+ RootedValue protoVal(cx, ObjectValue(*proto));
+ RootedValue ctorVal(cx, ObjectValue(*ctor));
+
+ return DefineDataProperty(cx, ctor, cx->names().prototype, protoVal,
+ prototypeAttrs) &&
+ DefineDataProperty(cx, proto, cx->names().constructor, ctorVal,
+ constructorAttrs);
+}
+
+bool js::DefinePropertiesAndFunctions(JSContext* cx, HandleObject obj,
+ const JSPropertySpec* ps,
+ const JSFunctionSpec* fs) {
+ if (ps && !JS_DefineProperties(cx, obj, ps)) {
+ return false;
+ }
+ if (fs && !JS_DefineFunctions(cx, obj, fs)) {
+ return false;
+ }
+ return true;
+}
+
+bool js::DefineToStringTag(JSContext* cx, HandleObject obj, JSAtom* tag) {
+ RootedId toStringTagId(
+ cx, PropertyKey::Symbol(cx->wellKnownSymbols().toStringTag));
+ RootedValue tagString(cx, StringValue(tag));
+ return DefineDataProperty(cx, obj, toStringTagId, tagString, JSPROP_READONLY);
+}
+
+/* static */
+NativeObject* GlobalObject::getOrCreateForOfPICObject(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ cx->check(global);
+ NativeObject* forOfPIC = global->getForOfPICObject();
+ if (forOfPIC) {
+ return forOfPIC;
+ }
+
+ forOfPIC = ForOfPIC::createForOfPICObject(cx, global);
+ if (!forOfPIC) {
+ return nullptr;
+ }
+ global->data().forOfPICChain.init(forOfPIC);
+ return forOfPIC;
+}
+
+/* static */
+JSObject* GlobalObject::getOrCreateRealmKeyObject(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ cx->check(global);
+ if (PlainObject* key = global->data().realmKeyObject) {
+ return key;
+ }
+
+ PlainObject* key = NewPlainObject(cx);
+ if (!key) {
+ return nullptr;
+ }
+
+ global->data().realmKeyObject.init(key);
+ return key;
+}
+
+/* static */
+RegExpStatics* GlobalObject::getRegExpStatics(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ MOZ_ASSERT(cx);
+
+ if (!global->data().regExpStatics) {
+ auto statics = RegExpStatics::create(cx);
+ if (!statics) {
+ return nullptr;
+ }
+ global->data().regExpStatics = std::move(statics);
+ }
+
+ return global->data().regExpStatics.get();
+}
+
+gc::FinalizationRegistryGlobalData*
+GlobalObject::getOrCreateFinalizationRegistryData() {
+ if (!data().finalizationRegistryData) {
+ data().finalizationRegistryData =
+ MakeUnique<gc::FinalizationRegistryGlobalData>(zone());
+ }
+
+ return maybeFinalizationRegistryData();
+}
+
+bool GlobalObject::addToVarNames(JSContext* cx, JS::Handle<JSAtom*> name) {
+ MOZ_ASSERT(name);
+
+ if (!data().varNames.put(name)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+/* static */
+bool GlobalObject::createIntrinsicsHolder(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ Rooted<NativeObject*> intrinsicsHolder(
+ cx, NewPlainObjectWithProto(cx, nullptr, TenuredObject));
+ if (!intrinsicsHolder) {
+ return false;
+ }
+
+ // Install the intrinsics holder on the global.
+ global->data().intrinsicsHolder.init(intrinsicsHolder);
+ return true;
+}
+
+/* static */
+bool GlobalObject::getSelfHostedFunction(JSContext* cx,
+ Handle<GlobalObject*> global,
+ Handle<PropertyName*> selfHostedName,
+ Handle<JSAtom*> name, unsigned nargs,
+ MutableHandleValue funVal) {
+ if (global->maybeGetIntrinsicValue(selfHostedName, funVal.address(), cx)) {
+ RootedFunction fun(cx, &funVal.toObject().as<JSFunction>());
+ if (fun->explicitName() == name) {
+ return true;
+ }
+
+ if (fun->explicitName() == selfHostedName) {
+ // This function was initially cloned because it was called by
+ // other self-hosted code, so the clone kept its self-hosted name,
+ // instead of getting the name it's intended to have in content
+ // compartments. This can happen when a lazy builtin is initialized
+ // after self-hosted code for another builtin used the same
+ // function. In that case, we need to change the function's name,
+ // which is ok because it can't have been exposed to content
+ // before.
+ fun->setAtom(name);
+ return true;
+ }
+
+ // The function might be installed multiple times on the same or
+ // different builtins, under different property names, so its name
+ // might be neither "selfHostedName" nor "name". In that case, its
+ // canonical name must've been set using the `_SetCanonicalName`
+ // intrinsic.
+ cx->runtime()->assertSelfHostedFunctionHasCanonicalName(selfHostedName);
+ return true;
+ }
+
+ JSRuntime* runtime = cx->runtime();
+ frontend::ScriptIndex index =
+ runtime->getSelfHostedScriptIndexRange(selfHostedName)->start;
+ JSFunction* fun =
+ runtime->selfHostStencil().instantiateSelfHostedLazyFunction(
+ cx, runtime->selfHostStencilInput().atomCache, index, name);
+ if (!fun) {
+ return false;
+ }
+ MOZ_ASSERT(fun->nargs() == nargs);
+ funVal.setObject(*fun);
+
+ return GlobalObject::addIntrinsicValue(cx, global, selfHostedName, funVal);
+}
+
+/* static */
+bool GlobalObject::getIntrinsicValueSlow(JSContext* cx,
+ Handle<GlobalObject*> global,
+ Handle<PropertyName*> name,
+ MutableHandleValue value) {
+ // If this is a C++ intrinsic, simply define the function on the intrinsics
+ // holder.
+ if (const JSFunctionSpec* spec = js::FindIntrinsicSpec(name)) {
+ RootedId id(cx, NameToId(name));
+ RootedFunction fun(cx, JS::NewFunctionFromSpec(cx, spec, id));
+ if (!fun) {
+ return false;
+ }
+ fun->setIsIntrinsic();
+
+ value.setObject(*fun);
+ return GlobalObject::addIntrinsicValue(cx, global, name, value);
+ }
+
+ if (!cx->runtime()->getSelfHostedValue(cx, name, value)) {
+ return false;
+ }
+
+ // It's possible in certain edge cases that cloning the value ended up
+ // defining the intrinsic. For instance, cloning can call NewArray, which
+ // resolves Array.prototype, which defines some self-hosted functions. If this
+ // happens we use the value already defined on the intrinsics holder.
+ if (global->maybeGetIntrinsicValue(name, value.address(), cx)) {
+ return true;
+ }
+
+ return GlobalObject::addIntrinsicValue(cx, global, name, value);
+}
+
+/* static */
+bool GlobalObject::addIntrinsicValue(JSContext* cx,
+ Handle<GlobalObject*> global,
+ Handle<PropertyName*> name,
+ HandleValue value) {
+ Rooted<NativeObject*> holder(cx, &global->getIntrinsicsHolder());
+
+ RootedId id(cx, NameToId(name));
+ MOZ_ASSERT(!holder->containsPure(id));
+
+ constexpr PropertyFlags propFlags = {PropertyFlag::Configurable,
+ PropertyFlag::Writable};
+ uint32_t slot;
+ if (!NativeObject::addProperty(cx, holder, id, propFlags, &slot)) {
+ return false;
+ }
+ holder->initSlot(slot, value);
+ return true;
+}
+
+/* static */
+JSObject* GlobalObject::createIteratorPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!cx->realm()->creationOptions().getIteratorHelpersEnabled()) {
+ return getOrCreateBuiltinProto(cx, global, ProtoKind::IteratorProto,
+ initIteratorProto);
+ }
+
+ if (!ensureConstructor(cx, global, JSProto_Iterator)) {
+ return nullptr;
+ }
+ JSObject* proto = &global->getPrototype(JSProto_Iterator);
+ global->initBuiltinProto(ProtoKind::IteratorProto, proto);
+ return proto;
+}
+
+/* static */
+JSObject* GlobalObject::createAsyncIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!cx->realm()->creationOptions().getIteratorHelpersEnabled()) {
+ return getOrCreateBuiltinProto(cx, global, ProtoKind::AsyncIteratorProto,
+ initAsyncIteratorProto);
+ }
+
+ if (!ensureConstructor(cx, global, JSProto_AsyncIterator)) {
+ return nullptr;
+ }
+ JSObject* proto = &global->getPrototype(JSProto_AsyncIterator);
+ global->initBuiltinProto(ProtoKind::AsyncIteratorProto, proto);
+ return proto;
+}
+
+void GlobalObject::releaseData(JS::GCContext* gcx) {
+ GlobalObjectData* data = maybeData();
+ setReservedSlot(GLOBAL_DATA_SLOT, PrivateValue(nullptr));
+ gcx->delete_(this, data, MemoryUse::GlobalObjectData);
+}
+
+GlobalObjectData::GlobalObjectData(Zone* zone) : varNames(zone) {}
+
+GlobalObjectData::~GlobalObjectData() = default;
+
+void GlobalObjectData::trace(JSTracer* trc, GlobalObject* global) {
+ // Atoms are always tenured so don't need to be traced during minor GC.
+ if (trc->runtime()->heapState() != JS::HeapState::MinorCollecting) {
+ varNames.trace(trc);
+ }
+
+ for (auto& ctorWithProto : builtinConstructors) {
+ TraceNullableEdge(trc, &ctorWithProto.constructor, "global-builtin-ctor");
+ TraceNullableEdge(trc, &ctorWithProto.prototype,
+ "global-builtin-ctor-proto");
+ }
+
+ for (auto& proto : builtinProtos) {
+ TraceNullableEdge(trc, &proto, "global-builtin-proto");
+ }
+
+ TraceNullableEdge(trc, &emptyGlobalScope, "global-empty-scope");
+
+ TraceNullableEdge(trc, &lexicalEnvironment, "global-lexical-env");
+ TraceNullableEdge(trc, &windowProxy, "global-window-proxy");
+ TraceNullableEdge(trc, &intrinsicsHolder, "global-intrinsics-holder");
+ TraceNullableEdge(trc, &computedIntrinsicsHolder,
+ "global-computed-intrinsics-holder");
+ TraceNullableEdge(trc, &forOfPICChain, "global-for-of-pic");
+ TraceNullableEdge(trc, &sourceURLsHolder, "global-source-urls");
+ TraceNullableEdge(trc, &realmKeyObject, "global-realm-key");
+ TraceNullableEdge(trc, &throwTypeError, "global-throw-type-error");
+ TraceNullableEdge(trc, &eval, "global-eval");
+ TraceNullableEdge(trc, &emptyIterator, "global-empty-iterator");
+
+ TraceNullableEdge(trc, &arrayShapeWithDefaultProto, "global-array-shape");
+
+ for (auto& shape : plainObjectShapesWithDefaultProto) {
+ TraceNullableEdge(trc, &shape, "global-plain-shape");
+ }
+
+ TraceNullableEdge(trc, &functionShapeWithDefaultProto,
+ "global-function-shape");
+ TraceNullableEdge(trc, &extendedFunctionShapeWithDefaultProto,
+ "global-ext-function-shape");
+
+ TraceNullableEdge(trc, &boundFunctionShapeWithDefaultProto,
+ "global-bound-function-shape");
+
+ if (regExpStatics) {
+ regExpStatics->trace(trc);
+ }
+
+ TraceNullableEdge(trc, &mappedArgumentsTemplate, "mapped-arguments-template");
+ TraceNullableEdge(trc, &unmappedArgumentsTemplate,
+ "unmapped-arguments-template");
+
+ TraceNullableEdge(trc, &iterResultTemplate, "iter-result-template_");
+ TraceNullableEdge(trc, &iterResultWithoutPrototypeTemplate,
+ "iter-result-without-prototype-template");
+
+ TraceNullableEdge(trc, &selfHostingScriptSource,
+ "self-hosting-script-source");
+
+ if (finalizationRegistryData) {
+ finalizationRegistryData->trace(trc);
+ }
+}
+
+void GlobalObjectData::addSizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf, JS::ClassInfo* info) const {
+ info->objectsMallocHeapGlobalData += mallocSizeOf(this);
+
+ if (regExpStatics) {
+ info->objectsMallocHeapGlobalData +=
+ regExpStatics->sizeOfIncludingThis(mallocSizeOf);
+ }
+
+ info->objectsMallocHeapGlobalVarNamesSet +=
+ varNames.shallowSizeOfExcludingThis(mallocSizeOf);
+}
diff --git a/js/src/vm/GlobalObject.h b/js/src/vm/GlobalObject.h
new file mode 100644
index 0000000000..9872185eea
--- /dev/null
+++ b/js/src/vm/GlobalObject.h
@@ -0,0 +1,1166 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_GlobalObject_h
+#define vm_GlobalObject_h
+
+#include "js/GlobalObject.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/EnumeratedArray.h"
+
+#include <stdint.h>
+#include <type_traits>
+
+#include "jsexn.h"
+#include "jsfriendapi.h"
+#include "jspubtd.h"
+#include "jstypes.h"
+#include "NamespaceImports.h"
+
+#include "gc/AllocKind.h"
+#include "js/CallArgs.h"
+#include "js/Class.h"
+#include "js/ErrorReport.h"
+#include "js/PropertyDescriptor.h"
+#include "js/RootingAPI.h"
+#include "js/TypeDecls.h"
+#include "js/Value.h"
+#include "vm/ArrayObject.h"
+#include "vm/JSAtomState.h"
+#include "vm/JSContext.h"
+#include "vm/JSFunction.h"
+#include "vm/JSObject.h"
+#include "vm/NativeObject.h"
+#include "vm/Realm.h"
+#include "vm/Shape.h"
+#include "vm/StringType.h"
+
+struct JSFunctionSpec;
+class JSJitInfo;
+struct JSPrincipals;
+struct JSPropertySpec;
+
+namespace JS {
+class JS_PUBLIC_API RealmOptions;
+};
+
+namespace js {
+
+class ArgumentsObject;
+class GlobalScope;
+class GlobalLexicalEnvironmentObject;
+class PlainObject;
+class PropertyIteratorObject;
+class RegExpStatics;
+
+namespace gc {
+class FinalizationRegistryGlobalData;
+} // namespace gc
+
+// Fixed slot capacities for PlainObjects. The global has a cached Shape for
+// PlainObject with default prototype for each of these values.
+enum class PlainObjectSlotsKind {
+ Slots0,
+ Slots2,
+ Slots4,
+ Slots8,
+ Slots12,
+ Slots16,
+ Limit
+};
+
+static PlainObjectSlotsKind PlainObjectSlotsKindFromAllocKind(
+ gc::AllocKind kind) {
+ switch (kind) {
+ case gc::AllocKind::OBJECT0:
+ return PlainObjectSlotsKind::Slots0;
+ case gc::AllocKind::OBJECT2:
+ return PlainObjectSlotsKind::Slots2;
+ case gc::AllocKind::OBJECT4:
+ return PlainObjectSlotsKind::Slots4;
+ case gc::AllocKind::OBJECT8:
+ return PlainObjectSlotsKind::Slots8;
+ case gc::AllocKind::OBJECT12:
+ return PlainObjectSlotsKind::Slots12;
+ case gc::AllocKind::OBJECT16:
+ return PlainObjectSlotsKind::Slots16;
+ default:
+ break;
+ }
+ MOZ_CRASH("Invalid kind");
+}
+
+// Data attached to a GlobalObject. This is freed when clearing the Realm's
+// global_ only because this way we don't need to add a finalizer to all
+// GlobalObject JSClasses.
+class GlobalObjectData {
+ friend class js::GlobalObject;
+
+ GlobalObjectData(const GlobalObjectData&) = delete;
+ void operator=(const GlobalObjectData&) = delete;
+
+ public:
+ explicit GlobalObjectData(Zone* zone);
+
+ ~GlobalObjectData();
+
+ // The global environment record's [[VarNames]] list that contains all
+ // names declared using FunctionDeclaration, GeneratorDeclaration, and
+ // VariableDeclaration declarations in global code in this global's realm.
+ // Names are only removed from this list by a |delete IdentifierReference|
+ // that successfully removes that global property.
+ using VarNamesSet =
+ GCHashSet<HeapPtr<JSAtom*>, DefaultHasher<JSAtom*>, CellAllocPolicy>;
+ VarNamesSet varNames;
+
+ // The original values for built-in constructors (with their prototype
+ // objects) based on JSProtoKey.
+ //
+ // This is necessary to implement spec language speaking in terms of "the
+ // original Array prototype object", or "as if by the expression new Array()"
+ // referring to the original Array constructor. The actual (writable and even
+ // deletable) Object, Array, &c. properties are not stored here.
+ struct ConstructorWithProto {
+ HeapPtr<JSObject*> constructor;
+ HeapPtr<JSObject*> prototype;
+ };
+ using CtorArray =
+ mozilla::EnumeratedArray<JSProtoKey, JSProto_LIMIT, ConstructorWithProto>;
+ CtorArray builtinConstructors;
+
+ // Built-in prototypes for this global. Note that this is different from the
+ // set of built-in constructors/prototypes based on JSProtoKey.
+ enum class ProtoKind {
+ IteratorProto,
+ ArrayIteratorProto,
+ StringIteratorProto,
+ RegExpStringIteratorProto,
+ GeneratorObjectProto,
+ AsyncIteratorProto,
+ AsyncFromSyncIteratorProto,
+ AsyncGeneratorProto,
+ MapIteratorProto,
+ SetIteratorProto,
+ WrapForValidIteratorProto,
+ IteratorHelperProto,
+ AsyncIteratorHelperProto,
+
+ Limit
+ };
+ using ProtoArray =
+ mozilla::EnumeratedArray<ProtoKind, ProtoKind::Limit, HeapPtr<JSObject*>>;
+ ProtoArray builtinProtos;
+
+ HeapPtr<GlobalScope*> emptyGlobalScope;
+
+ // The lexical environment for global let/const/class bindings.
+ HeapPtr<GlobalLexicalEnvironmentObject*> lexicalEnvironment;
+
+ // The WindowProxy associated with this global.
+ HeapPtr<JSObject*> windowProxy;
+
+ // Functions and other top-level values for self-hosted code. The "computed"
+ // holder is used as the target of `SetIntrinsic` calls, but the same property
+ // may also be cached on the normal intrinsics holder for `GetIntrinsic`.
+ HeapPtr<NativeObject*> intrinsicsHolder;
+ HeapPtr<NativeObject*> computedIntrinsicsHolder;
+
+ // Cache used to optimize certain for-of operations.
+ HeapPtr<NativeObject*> forOfPICChain;
+
+ // List of source URLs for this realm. This is used by the debugger.
+ HeapPtr<ArrayObject*> sourceURLsHolder;
+
+ // Realm-specific object that can be used as key in WeakMaps.
+ HeapPtr<PlainObject*> realmKeyObject;
+
+ // The unique %ThrowTypeError% function for this global.
+ HeapPtr<JSFunction*> throwTypeError;
+
+ // The unique %eval% function (for indirect eval) for this global.
+ HeapPtr<JSFunction*> eval;
+
+ // Empty iterator object used for for-in with null/undefined.
+ HeapPtr<PropertyIteratorObject*> emptyIterator;
+
+ // Cached shape for new arrays with Array.prototype as prototype.
+ HeapPtr<SharedShape*> arrayShapeWithDefaultProto;
+
+ // Shape for PlainObject with %Object.prototype% as proto, for each object
+ // AllocKind.
+ using PlainObjectShapeArray = mozilla::EnumeratedArray<
+ PlainObjectSlotsKind, PlainObjectSlotsKind::Limit, HeapPtr<SharedShape*>>;
+ PlainObjectShapeArray plainObjectShapesWithDefaultProto;
+
+ // Shape for JSFunction with %Function.prototype% as proto, for both
+ // non-extended and extended functions.
+ HeapPtr<SharedShape*> functionShapeWithDefaultProto;
+ HeapPtr<SharedShape*> extendedFunctionShapeWithDefaultProto;
+
+ // Shape for BoundFunctionObject with %Function.prototype% as proto.
+ HeapPtr<SharedShape*> boundFunctionShapeWithDefaultProto;
+
+ // Global state for regular expressions.
+ UniquePtr<RegExpStatics> regExpStatics;
+
+ HeapPtr<ArgumentsObject*> mappedArgumentsTemplate;
+ HeapPtr<ArgumentsObject*> unmappedArgumentsTemplate;
+
+ HeapPtr<PlainObject*> iterResultTemplate;
+ HeapPtr<PlainObject*> iterResultWithoutPrototypeTemplate;
+
+ // Lazily initialized script source object to use for scripts cloned from the
+ // self-hosting stencil.
+ HeapPtr<ScriptSourceObject*> selfHostingScriptSource;
+
+ UniquePtr<gc::FinalizationRegistryGlobalData> finalizationRegistryData;
+
+ // The number of times that one of the following has occurred:
+ // 1. A property of this GlobalObject is deleted.
+ // 2. A data property of this GlobalObject is converted to an accessor,
+ // or vice versa.
+ // 3. A property is defined on the global lexical that shadows a property on
+ // this GlobalObject.
+ uint32_t generationCount = 0;
+
+ // Whether the |globalThis| property has been resolved on the global object.
+ bool globalThisResolved = false;
+
+ void trace(JSTracer* trc, GlobalObject* global);
+ void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::ClassInfo* info) const;
+
+ static constexpr size_t offsetOfLexicalEnvironment() {
+ static_assert(sizeof(lexicalEnvironment) == sizeof(uintptr_t),
+ "JIT code assumes field is pointer-sized");
+ return offsetof(GlobalObjectData, lexicalEnvironment);
+ }
+};
+
+class GlobalObject : public NativeObject {
+ enum : unsigned {
+ GLOBAL_DATA_SLOT = JSCLASS_GLOBAL_APPLICATION_SLOTS,
+
+ // Total reserved-slot count for global objects.
+ RESERVED_SLOTS
+ };
+
+ // The slot count must be in the public API for JSCLASS_GLOBAL_FLAGS, and
+ // we won't expose GlobalObject, so just assert that the two values are
+ // synchronized.
+ static_assert(JSCLASS_GLOBAL_SLOT_COUNT == RESERVED_SLOTS,
+ "global object slot counts are inconsistent");
+
+ // Ensure GlobalObjectData is only one dereference away.
+ static_assert(GLOBAL_DATA_SLOT < MAX_FIXED_SLOTS,
+ "GlobalObjectData should be stored in a fixed slot for "
+ "performance reasons");
+
+ using ProtoKind = GlobalObjectData::ProtoKind;
+
+ GlobalObjectData* maybeData() {
+ Value v = getReservedSlot(GLOBAL_DATA_SLOT);
+ return static_cast<GlobalObjectData*>(v.toPrivate());
+ }
+ const GlobalObjectData* maybeData() const {
+ Value v = getReservedSlot(GLOBAL_DATA_SLOT);
+ return static_cast<const GlobalObjectData*>(v.toPrivate());
+ }
+
+ GlobalObjectData& data() { return *maybeData(); }
+ const GlobalObjectData& data() const { return *maybeData(); }
+
+ void initBuiltinProto(ProtoKind kind, JSObject* proto) {
+ MOZ_ASSERT(proto);
+ data().builtinProtos[kind].init(proto);
+ }
+ bool hasBuiltinProto(ProtoKind kind) const {
+ return bool(data().builtinProtos[kind]);
+ }
+ JSObject* maybeBuiltinProto(ProtoKind kind) const {
+ return data().builtinProtos[kind];
+ }
+ JSObject& getBuiltinProto(ProtoKind kind) const {
+ MOZ_ASSERT(hasBuiltinProto(kind));
+ return *data().builtinProtos[kind];
+ }
+
+ public:
+ GlobalLexicalEnvironmentObject& lexicalEnvironment() {
+ return *data().lexicalEnvironment;
+ }
+ GlobalScope& emptyGlobalScope() const;
+
+ void traceData(JSTracer* trc, GlobalObject* global) {
+ data().trace(trc, global);
+ }
+ void releaseData(JS::GCContext* gcx);
+
+ void addSizeOfData(mozilla::MallocSizeOf mallocSizeOf,
+ JS::ClassInfo* info) const {
+ if (maybeData()) {
+ data().addSizeOfIncludingThis(mallocSizeOf, info);
+ }
+ }
+
+ void setOriginalEval(JSFunction* evalFun) {
+ MOZ_ASSERT(!data().eval);
+ data().eval.init(evalFun);
+ }
+
+ bool hasConstructor(JSProtoKey key) const {
+ return bool(data().builtinConstructors[key].constructor);
+ }
+ JSObject& getConstructor(JSProtoKey key) const {
+ MOZ_ASSERT(hasConstructor(key));
+ return *maybeGetConstructor(key);
+ }
+
+ static bool skipDeselectedConstructor(JSContext* cx, JSProtoKey key);
+
+ private:
+ enum class IfClassIsDisabled { DoNothing, Throw };
+
+ static bool resolveConstructor(JSContext* cx, Handle<GlobalObject*> global,
+ JSProtoKey key, IfClassIsDisabled mode);
+
+ public:
+ static bool ensureConstructor(JSContext* cx, Handle<GlobalObject*> global,
+ JSProtoKey key) {
+ if (global->isStandardClassResolved(key)) {
+ return true;
+ }
+ return resolveConstructor(cx, global, key, IfClassIsDisabled::Throw);
+ }
+
+ static JSObject* getOrCreateConstructor(JSContext* cx, JSProtoKey key) {
+ MOZ_ASSERT(key != JSProto_Null);
+ Handle<GlobalObject*> global = cx->global();
+ if (!GlobalObject::ensureConstructor(cx, global, key)) {
+ return nullptr;
+ }
+ return &global->getConstructor(key);
+ }
+
+ static JSObject* getOrCreatePrototype(JSContext* cx, JSProtoKey key) {
+ MOZ_ASSERT(key != JSProto_Null);
+ Handle<GlobalObject*> global = cx->global();
+ if (!GlobalObject::ensureConstructor(cx, global, key)) {
+ return nullptr;
+ }
+ return &global->getPrototype(key);
+ }
+
+ static JS::Handle<JSObject*> getOrCreatePrototypeHandle(JSContext* cx,
+ JSProtoKey key) {
+ MOZ_ASSERT(key != JSProto_Null);
+ Handle<GlobalObject*> global = cx->global();
+ if (!GlobalObject::ensureConstructor(cx, global, key)) {
+ return nullptr;
+ }
+ return global->getPrototypeHandle(key);
+ }
+
+ JSObject* maybeGetConstructor(JSProtoKey protoKey) const {
+ MOZ_ASSERT(JSProto_Null < protoKey);
+ MOZ_ASSERT(protoKey < JSProto_LIMIT);
+ return data().builtinConstructors[protoKey].constructor;
+ }
+
+ JSObject* maybeGetPrototype(JSProtoKey protoKey) const {
+ MOZ_ASSERT(JSProto_Null < protoKey);
+ MOZ_ASSERT(protoKey < JSProto_LIMIT);
+ return data().builtinConstructors[protoKey].prototype;
+ }
+
+ static bool maybeResolveGlobalThis(JSContext* cx,
+ Handle<GlobalObject*> global,
+ bool* resolved);
+
+ void setConstructor(JSProtoKey key, JSObject* obj) {
+ MOZ_ASSERT(obj);
+ data().builtinConstructors[key].constructor = obj;
+ }
+
+ bool hasPrototype(JSProtoKey key) const {
+ return bool(data().builtinConstructors[key].prototype);
+ }
+ JSObject& getPrototype(JSProtoKey key) const {
+ MOZ_ASSERT(hasPrototype(key));
+ return *maybeGetPrototype(key);
+ }
+
+ JS::Handle<JSObject*> getPrototypeHandle(JSProtoKey protoKey) const {
+ MOZ_ASSERT(hasPrototype(protoKey));
+ MOZ_ASSERT(JSProto_Null < protoKey);
+ MOZ_ASSERT(protoKey < JSProto_LIMIT);
+ return Handle<JSObject*>::fromMarkedLocation(
+ &data().builtinConstructors[protoKey].prototype.get());
+ }
+
+ void setPrototype(JSProtoKey key, JSObject* obj) {
+ MOZ_ASSERT(obj);
+ data().builtinConstructors[key].prototype = obj;
+ }
+
+ /*
+ * Lazy standard classes need a way to indicate they have been initialized.
+ * Otherwise, when we delete them, we might accidentally recreate them via
+ * a lazy initialization. We use the presence of an object in the constructor
+ * array to indicate that they've been initialized.
+ *
+ * Note: A few builtin objects, like JSON and Math, are not constructors,
+ * so getConstructor is a bit of a misnomer.
+ */
+ bool isStandardClassResolved(JSProtoKey key) const {
+ return hasConstructor(key);
+ }
+
+ private:
+ bool classIsInitialized(JSProtoKey key) const {
+ bool inited = hasConstructor(key);
+ MOZ_ASSERT(inited == hasPrototype(key));
+ return inited;
+ }
+
+ bool functionObjectClassesInitialized() const {
+ bool inited = classIsInitialized(JSProto_Function);
+ MOZ_ASSERT(inited == classIsInitialized(JSProto_Object));
+ return inited;
+ }
+
+ // Disallow use of unqualified JSObject::create in GlobalObject.
+ static GlobalObject* create(...) = delete;
+
+ friend struct ::JSRuntime;
+ static GlobalObject* createInternal(JSContext* cx, const JSClass* clasp);
+
+ public:
+ static GlobalObject* new_(JSContext* cx, const JSClass* clasp,
+ JSPrincipals* principals,
+ JS::OnNewGlobalHookOption hookOption,
+ const JS::RealmOptions& options);
+
+ /*
+ * Create a constructor function with the specified name and length using
+ * ctor, a method which creates objects with the given class.
+ */
+ static JSFunction* createConstructor(
+ JSContext* cx, JSNative ctor, JSAtom* name, unsigned length,
+ gc::AllocKind kind = gc::AllocKind::FUNCTION,
+ const JSJitInfo* jitInfo = nullptr);
+
+ /*
+ * Create an object to serve as [[Prototype]] for instances of the given
+ * class, using |Object.prototype| as its [[Prototype]]. Users creating
+ * prototype objects with particular internal structure (e.g. reserved
+ * slots guaranteed to contain values of particular types) must immediately
+ * complete the minimal initialization to make the returned object safe to
+ * touch.
+ */
+ static NativeObject* createBlankPrototype(JSContext* cx,
+ Handle<GlobalObject*> global,
+ const JSClass* clasp);
+
+ /*
+ * Identical to createBlankPrototype, but uses proto as the [[Prototype]]
+ * of the returned blank prototype.
+ */
+ static NativeObject* createBlankPrototypeInheriting(JSContext* cx,
+ const JSClass* clasp,
+ HandleObject proto);
+
+ template <typename T>
+ static T* createBlankPrototypeInheriting(JSContext* cx, HandleObject proto) {
+ NativeObject* res = createBlankPrototypeInheriting(cx, &T::class_, proto);
+ return res ? &res->template as<T>() : nullptr;
+ }
+
+ template <typename T>
+ static T* createBlankPrototype(JSContext* cx, Handle<GlobalObject*> global) {
+ NativeObject* res = createBlankPrototype(cx, global, &T::class_);
+ return res ? &res->template as<T>() : nullptr;
+ }
+
+ // Object, Function, and eval are eagerly resolved when creating the global.
+ JSObject& getObjectPrototype() {
+ MOZ_ASSERT(functionObjectClassesInitialized());
+ return getPrototype(JSProto_Object);
+ }
+ Handle<JSObject*> getObjectPrototypeHandle() {
+ MOZ_ASSERT(functionObjectClassesInitialized());
+ return getPrototypeHandle(JSProto_Object);
+ }
+ JSObject& getFunctionConstructor() {
+ MOZ_ASSERT(functionObjectClassesInitialized());
+ return getConstructor(JSProto_Function);
+ }
+ JSObject& getFunctionPrototype() {
+ MOZ_ASSERT(functionObjectClassesInitialized());
+ return getPrototype(JSProto_Function);
+ }
+ JSFunction& getEvalFunction() {
+ MOZ_ASSERT(data().eval);
+ return *data().eval;
+ }
+
+ static NativeObject* getOrCreateArrayPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_Array)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_Array).as<NativeObject>();
+ }
+
+ NativeObject* maybeGetArrayPrototype() {
+ if (classIsInitialized(JSProto_Array)) {
+ return &getPrototype(JSProto_Array).as<NativeObject>();
+ }
+ return nullptr;
+ }
+
+ static JSObject* getOrCreateBooleanPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_Boolean)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_Boolean);
+ }
+
+ static JSObject* getOrCreateNumberPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_Number)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_Number);
+ }
+
+ static JSObject* getOrCreateStringPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_String)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_String);
+ }
+
+ static JSObject* getOrCreateSymbolPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_Symbol)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_Symbol);
+ }
+
+ static JSObject* getOrCreateBigIntPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_BigInt)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_BigInt);
+ }
+
+#ifdef ENABLE_RECORD_TUPLE
+ static JSObject* getOrCreateRecordPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_Record)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_Record);
+ }
+
+ static JSObject* getOrCreateTuplePrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_Tuple)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_Tuple);
+ }
+#endif
+
+ static JSObject* getOrCreatePromisePrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_Promise)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_Promise);
+ }
+
+ static JSObject* getOrCreateRegExpPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_RegExp)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_RegExp);
+ }
+
+ JSObject* maybeGetRegExpPrototype() {
+ if (classIsInitialized(JSProto_RegExp)) {
+ return &getPrototype(JSProto_RegExp);
+ }
+ return nullptr;
+ }
+
+ static JSObject* getOrCreateSavedFramePrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_SavedFrame)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_SavedFrame);
+ }
+
+ static JSObject* getOrCreateArrayBufferConstructor(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_ArrayBuffer)) {
+ return nullptr;
+ }
+ return &global->getConstructor(JSProto_ArrayBuffer);
+ }
+
+ static JSObject* getOrCreateArrayBufferPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_ArrayBuffer)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_ArrayBuffer);
+ }
+
+ static JSObject* getOrCreateSharedArrayBufferPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_SharedArrayBuffer)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_SharedArrayBuffer);
+ }
+
+ static JSObject* getOrCreateCustomErrorPrototype(JSContext* cx,
+ Handle<GlobalObject*> global,
+ JSExnType exnType) {
+ JSProtoKey key = GetExceptionProtoKey(exnType);
+ if (!ensureConstructor(cx, global, key)) {
+ return nullptr;
+ }
+ return &global->getPrototype(key);
+ }
+
+ static JSFunction* getOrCreateErrorConstructor(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_Error)) {
+ return nullptr;
+ }
+ return &global->getConstructor(JSProto_Error).as<JSFunction>();
+ }
+
+ static JSObject* getOrCreateErrorPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ return getOrCreateCustomErrorPrototype(cx, global, JSEXN_ERR);
+ }
+
+ static NativeObject* getOrCreateSetPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_Set)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_Set).as<NativeObject>();
+ }
+
+ static NativeObject* getOrCreateWeakSetPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_WeakSet)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_WeakSet).as<NativeObject>();
+ }
+
+ static JSFunction* getOrCreateTypedArrayConstructor(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_TypedArray)) {
+ return nullptr;
+ }
+ return &global->getConstructor(JSProto_TypedArray).as<JSFunction>();
+ }
+
+ static JSObject* getOrCreateTypedArrayPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_TypedArray)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_TypedArray);
+ }
+
+ private:
+ using ObjectInitOp = bool (*)(JSContext*, Handle<GlobalObject*>);
+ using ObjectInitWithTagOp = bool (*)(JSContext*, Handle<GlobalObject*>,
+ Handle<JSAtom*>);
+
+ static JSObject* getOrCreateBuiltinProto(JSContext* cx,
+ Handle<GlobalObject*> global,
+ ProtoKind kind, ObjectInitOp init) {
+ if (JSObject* proto = global->maybeBuiltinProto(kind)) {
+ return proto;
+ }
+
+ return createBuiltinProto(cx, global, kind, init);
+ }
+
+ static JSObject* getOrCreateBuiltinProto(JSContext* cx,
+ Handle<GlobalObject*> global,
+ ProtoKind kind, Handle<JSAtom*> tag,
+ ObjectInitWithTagOp init) {
+ if (JSObject* proto = global->maybeBuiltinProto(kind)) {
+ return proto;
+ }
+
+ return createBuiltinProto(cx, global, kind, tag, init);
+ }
+
+ static JSObject* createBuiltinProto(JSContext* cx,
+ Handle<GlobalObject*> global,
+ ProtoKind kind, ObjectInitOp init);
+ static JSObject* createBuiltinProto(JSContext* cx,
+ Handle<GlobalObject*> global,
+ ProtoKind kind, Handle<JSAtom*> tag,
+ ObjectInitWithTagOp init);
+
+ static JSObject* createIteratorPrototype(JSContext* cx,
+ Handle<GlobalObject*> global);
+
+ public:
+ static JSObject* getOrCreateIteratorPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (JSObject* proto = global->maybeBuiltinProto(ProtoKind::IteratorProto)) {
+ return proto;
+ }
+ return createIteratorPrototype(cx, global);
+ }
+
+ static NativeObject* getOrCreateArrayIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global);
+
+ NativeObject* maybeGetArrayIteratorPrototype() {
+ if (JSObject* obj = maybeBuiltinProto(ProtoKind::ArrayIteratorProto)) {
+ return &obj->as<NativeObject>();
+ }
+ return nullptr;
+ }
+
+ static JSObject* getOrCreateStringIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global);
+
+ static JSObject* getOrCreateRegExpStringIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global);
+
+ void setGeneratorObjectPrototype(JSObject* obj) {
+ initBuiltinProto(ProtoKind::GeneratorObjectProto, obj);
+ }
+
+ static JSObject* getOrCreateGeneratorObjectPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_GeneratorFunction)) {
+ return nullptr;
+ }
+ return &global->getBuiltinProto(ProtoKind::GeneratorObjectProto);
+ }
+
+ static JSObject* getOrCreateGeneratorFunctionPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_GeneratorFunction)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_GeneratorFunction);
+ }
+
+ static JSObject* getOrCreateGeneratorFunction(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_GeneratorFunction)) {
+ return nullptr;
+ }
+ return &global->getConstructor(JSProto_GeneratorFunction);
+ }
+
+ static JSObject* getOrCreateAsyncFunctionPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_AsyncFunction)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_AsyncFunction);
+ }
+
+ static JSObject* getOrCreateAsyncFunction(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_AsyncFunction)) {
+ return nullptr;
+ }
+ return &global->getConstructor(JSProto_AsyncFunction);
+ }
+
+ static JSObject* createAsyncIteratorPrototype(JSContext* cx,
+ Handle<GlobalObject*> global);
+
+ static JSObject* getOrCreateAsyncIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (JSObject* proto =
+ global->maybeBuiltinProto(ProtoKind::AsyncIteratorProto)) {
+ return proto;
+ }
+ return createAsyncIteratorPrototype(cx, global);
+ }
+
+ static JSObject* getOrCreateAsyncFromSyncIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ return getOrCreateBuiltinProto(cx, global,
+ ProtoKind::AsyncFromSyncIteratorProto,
+ initAsyncFromSyncIteratorProto);
+ }
+
+ static JSObject* getOrCreateAsyncGenerator(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_AsyncGeneratorFunction)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_AsyncGeneratorFunction);
+ }
+
+ static JSObject* getOrCreateAsyncGeneratorFunction(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_AsyncGeneratorFunction)) {
+ return nullptr;
+ }
+ return &global->getConstructor(JSProto_AsyncGeneratorFunction);
+ }
+
+ void setAsyncGeneratorPrototype(JSObject* obj) {
+ initBuiltinProto(ProtoKind::AsyncGeneratorProto, obj);
+ }
+
+ static JSObject* getOrCreateAsyncGeneratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_AsyncGeneratorFunction)) {
+ return nullptr;
+ }
+ return &global->getBuiltinProto(ProtoKind::AsyncGeneratorProto);
+ }
+
+ static JSObject* getOrCreateMapIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ return getOrCreateBuiltinProto(cx, global, ProtoKind::MapIteratorProto,
+ initMapIteratorProto);
+ }
+
+ static JSObject* getOrCreateSetIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ return getOrCreateBuiltinProto(cx, global, ProtoKind::SetIteratorProto,
+ initSetIteratorProto);
+ }
+
+ static JSObject* getOrCreateDataViewPrototype(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_DataView)) {
+ return nullptr;
+ }
+ return &global->getPrototype(JSProto_DataView);
+ }
+
+ static JSObject* getOrCreatePromiseConstructor(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (!ensureConstructor(cx, global, JSProto_Promise)) {
+ return nullptr;
+ }
+ return &global->getConstructor(JSProto_Promise);
+ }
+
+ static NativeObject* getOrCreateWrapForValidIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global);
+
+ static NativeObject* getOrCreateIteratorHelperPrototype(
+ JSContext* cx, Handle<GlobalObject*> global);
+
+ static NativeObject* getOrCreateAsyncIteratorHelperPrototype(
+ JSContext* cx, Handle<GlobalObject*> global);
+ static bool initAsyncIteratorHelperProto(JSContext* cx,
+ Handle<GlobalObject*> global);
+
+ NativeObject& getIntrinsicsHolder() const {
+ MOZ_ASSERT(data().intrinsicsHolder);
+ return *data().intrinsicsHolder;
+ }
+
+ static bool createIntrinsicsHolder(JSContext* cx,
+ Handle<GlobalObject*> global);
+
+ NativeObject* getComputedIntrinsicsHolder() {
+ return data().computedIntrinsicsHolder;
+ }
+ void setComputedIntrinsicsHolder(NativeObject* holder) {
+ data().computedIntrinsicsHolder = holder;
+ }
+
+ // If a self-hosting intrinsic with the given |name| exists, it's stored in
+ // |*vp| and this function returns true. Else it returns false.
+ bool maybeGetIntrinsicValue(PropertyName* name, Value* vp, JSContext* cx) {
+ NativeObject& holder = getIntrinsicsHolder();
+
+ if (mozilla::Maybe<PropertyInfo> prop = holder.lookup(cx, name)) {
+ *vp = holder.getSlot(prop->slot());
+ return true;
+ }
+
+ return false;
+ }
+
+ static bool getIntrinsicValue(JSContext* cx, Handle<GlobalObject*> global,
+ Handle<PropertyName*> name,
+ MutableHandleValue value) {
+ // `undefined` in self-hosted JS code should be emitted as JSOp::Undefined.
+ MOZ_ASSERT(name != cx->names().undefined);
+
+ if (global->maybeGetIntrinsicValue(name, value.address(), cx)) {
+ return true;
+ }
+ return getIntrinsicValueSlow(cx, global, name, value);
+ }
+
+ static bool getIntrinsicValueSlow(JSContext* cx, Handle<GlobalObject*> global,
+ Handle<PropertyName*> name,
+ MutableHandleValue value);
+
+ static bool addIntrinsicValue(JSContext* cx, Handle<GlobalObject*> global,
+ Handle<PropertyName*> name, HandleValue value);
+
+ static inline bool setIntrinsicValue(JSContext* cx,
+ Handle<GlobalObject*> global,
+ Handle<PropertyName*> name,
+ HandleValue value);
+
+ static bool getSelfHostedFunction(JSContext* cx, Handle<GlobalObject*> global,
+ Handle<PropertyName*> selfHostedName,
+ Handle<JSAtom*> name, unsigned nargs,
+ MutableHandleValue funVal);
+
+ static RegExpStatics* getRegExpStatics(JSContext* cx,
+ Handle<GlobalObject*> global);
+
+ static JSObject* getOrCreateThrowTypeError(JSContext* cx,
+ Handle<GlobalObject*> global);
+
+ // Infallibly test whether the given value is the eval function for this
+ // global.
+ bool valueIsEval(const Value& val);
+
+ void removeFromVarNames(JSAtom* name) { data().varNames.remove(name); }
+
+ // Whether the given name is in [[VarNames]].
+ bool isInVarNames(JSAtom* name) { return data().varNames.has(name); }
+
+ // Add a name to [[VarNames]]. Reports OOM on failure.
+ [[nodiscard]] bool addToVarNames(JSContext* cx, JS::Handle<JSAtom*> name);
+
+ static ArgumentsObject* getOrCreateArgumentsTemplateObject(JSContext* cx,
+ bool mapped);
+ ArgumentsObject* maybeArgumentsTemplateObject(bool mapped) const;
+
+ static const size_t IterResultObjectValueSlot = 0;
+ static const size_t IterResultObjectDoneSlot = 1;
+ static js::PlainObject* getOrCreateIterResultTemplateObject(JSContext* cx);
+ static js::PlainObject* getOrCreateIterResultWithoutPrototypeTemplateObject(
+ JSContext* cx);
+
+ private:
+ enum class WithObjectPrototype { No, Yes };
+ static js::PlainObject* createIterResultTemplateObject(
+ JSContext* cx, WithObjectPrototype withProto);
+
+ public:
+ static ScriptSourceObject* getOrCreateSelfHostingScriptSourceObject(
+ JSContext* cx, Handle<GlobalObject*> global);
+
+ // Implemented in vm/Iteration.cpp.
+ static bool initIteratorProto(JSContext* cx, Handle<GlobalObject*> global);
+ template <ProtoKind Kind, const JSClass* ProtoClass,
+ const JSFunctionSpec* Methods>
+ static bool initObjectIteratorProto(JSContext* cx,
+ Handle<GlobalObject*> global,
+ Handle<JSAtom*> tag);
+
+ // Implemented in vm/AsyncIteration.cpp.
+ static bool initAsyncIteratorProto(JSContext* cx,
+ Handle<GlobalObject*> global);
+ static bool initAsyncFromSyncIteratorProto(JSContext* cx,
+ Handle<GlobalObject*> global);
+
+ // Implemented in builtin/MapObject.cpp.
+ static bool initMapIteratorProto(JSContext* cx, Handle<GlobalObject*> global);
+ static bool initSetIteratorProto(JSContext* cx, Handle<GlobalObject*> global);
+
+ static bool initStandardClasses(JSContext* cx, Handle<GlobalObject*> global);
+
+ // Disallow GC as it may mutate the vector.
+ Realm::DebuggerVector& getDebuggers(const JS::AutoRequireNoGC& nogc) const {
+ return realm()->getDebuggers(nogc);
+ }
+ bool hasDebuggers() const { return realm()->hasDebuggers(); }
+
+ inline NativeObject* getForOfPICObject() { return data().forOfPICChain; }
+ static NativeObject* getOrCreateForOfPICObject(JSContext* cx,
+ Handle<GlobalObject*> global);
+
+ JSObject* maybeWindowProxy() const { return data().windowProxy; }
+
+ void setWindowProxy(JSObject* windowProxy) {
+ // Note: the global must always be associated with the same WindowProxy.
+ // CacheIR optimizations rely on this by baking in the WindowProxy for the
+ // global.
+ MOZ_ASSERT(!data().windowProxy);
+ data().windowProxy.init(windowProxy);
+ }
+
+ ArrayObject* getSourceURLsHolder() const { return data().sourceURLsHolder; }
+
+ void setSourceURLsHolder(ArrayObject* holder) {
+ data().sourceURLsHolder = holder;
+ }
+ void clearSourceURLSHolder() {
+ // This is called at the start of shrinking GCs, so avoids barriers.
+ data().sourceURLsHolder.unbarrieredSet(nullptr);
+ }
+
+ SharedShape* maybeArrayShapeWithDefaultProto() const {
+ return data().arrayShapeWithDefaultProto;
+ }
+
+ static SharedShape* getArrayShapeWithDefaultProto(JSContext* cx) {
+ if (SharedShape* shape = cx->global()->data().arrayShapeWithDefaultProto;
+ MOZ_LIKELY(shape)) {
+ return shape;
+ }
+ return createArrayShapeWithDefaultProto(cx);
+ }
+ static SharedShape* createArrayShapeWithDefaultProto(JSContext* cx);
+
+ static SharedShape* getPlainObjectShapeWithDefaultProto(JSContext* cx,
+ gc::AllocKind kind) {
+ PlainObjectSlotsKind slotsKind = PlainObjectSlotsKindFromAllocKind(kind);
+ SharedShape* shape =
+ cx->global()->data().plainObjectShapesWithDefaultProto[slotsKind];
+ if (MOZ_LIKELY(shape)) {
+ return shape;
+ }
+ return createPlainObjectShapeWithDefaultProto(cx, kind);
+ }
+ static SharedShape* createPlainObjectShapeWithDefaultProto(
+ JSContext* cx, gc::AllocKind kind);
+
+ static SharedShape* getFunctionShapeWithDefaultProto(JSContext* cx,
+ bool extended) {
+ GlobalObjectData& data = cx->global()->data();
+ SharedShape* shape = extended ? data.extendedFunctionShapeWithDefaultProto
+ : data.functionShapeWithDefaultProto;
+ if (MOZ_LIKELY(shape)) {
+ return shape;
+ }
+ return createFunctionShapeWithDefaultProto(cx, extended);
+ }
+ static SharedShape* createFunctionShapeWithDefaultProto(JSContext* cx,
+ bool extended);
+
+ SharedShape* maybeBoundFunctionShapeWithDefaultProto() const {
+ return data().boundFunctionShapeWithDefaultProto;
+ }
+ void setBoundFunctionShapeWithDefaultProto(SharedShape* shape) {
+ data().boundFunctionShapeWithDefaultProto = shape;
+ }
+
+ PropertyIteratorObject* maybeEmptyIterator() const {
+ return data().emptyIterator;
+ }
+
+ static PropertyIteratorObject* getOrCreateEmptyIterator(JSContext* cx);
+
+ // Returns an object that represents the realm, used by embedder.
+ static JSObject* getOrCreateRealmKeyObject(JSContext* cx,
+ Handle<GlobalObject*> global);
+
+ gc::FinalizationRegistryGlobalData* getOrCreateFinalizationRegistryData();
+ gc::FinalizationRegistryGlobalData* maybeFinalizationRegistryData() const {
+ return data().finalizationRegistryData.get();
+ }
+
+ static size_t offsetOfGlobalDataSlot() {
+ return getFixedSlotOffset(GLOBAL_DATA_SLOT);
+ }
+
+ uint32_t generationCount() const { return data().generationCount; }
+ const void* addressOfGenerationCount() const {
+ return &data().generationCount;
+ }
+ void bumpGenerationCount() {
+ MOZ_RELEASE_ASSERT(data().generationCount < UINT32_MAX);
+ data().generationCount++;
+ }
+};
+
+/*
+ * Unless otherwise specified, define ctor.prototype = proto as non-enumerable,
+ * non-configurable, and non-writable; and define proto.constructor = ctor as
+ * non-enumerable but configurable and writable.
+ */
+extern bool LinkConstructorAndPrototype(
+ JSContext* cx, JSObject* ctor, JSObject* proto,
+ unsigned prototypeAttrs = JSPROP_PERMANENT | JSPROP_READONLY,
+ unsigned constructorAttrs = 0);
+
+/*
+ * Define properties and/or functions on any object. Either ps or fs, or both,
+ * may be null.
+ */
+extern bool DefinePropertiesAndFunctions(JSContext* cx, HandleObject obj,
+ const JSPropertySpec* ps,
+ const JSFunctionSpec* fs);
+
+extern bool DefineToStringTag(JSContext* cx, HandleObject obj, JSAtom* tag);
+
+/*
+ * Convenience templates to generic constructor and prototype creation functions
+ * for ClassSpecs.
+ */
+
+template <JSNative ctor, unsigned length, gc::AllocKind kind,
+ const JSJitInfo* jitInfo = nullptr>
+JSObject* GenericCreateConstructor(JSContext* cx, JSProtoKey key) {
+ // Note - We duplicate the trick from ClassName() so that we don't need to
+ // include vm/JSAtom-inl.h here.
+ PropertyName* name = (&cx->names().Null)[key];
+ return GlobalObject::createConstructor(cx, ctor, name, length, kind, jitInfo);
+}
+
+template <typename T>
+JSObject* GenericCreatePrototype(JSContext* cx, JSProtoKey key) {
+ static_assert(
+ !std::is_same_v<T, PlainObject>,
+ "creating Object.prototype is very special and isn't handled here");
+ MOZ_ASSERT(&T::class_ == ProtoKeyToClass(key),
+ "type mismatch--probably too much copy/paste in your ClassSpec");
+ MOZ_ASSERT(
+ InheritanceProtoKeyForStandardClass(key) == JSProto_Object,
+ "subclasses (of anything but Object) can't use GenericCreatePrototype");
+ return GlobalObject::createBlankPrototype(cx, cx->global(), &T::protoClass_);
+}
+
+inline JSProtoKey StandardProtoKeyOrNull(const JSObject* obj) {
+ return JSCLASS_CACHED_PROTO_KEY(obj->getClass());
+}
+
+JSObject* NewTenuredObjectWithFunctionPrototype(JSContext* cx,
+ Handle<GlobalObject*> global);
+
+} // namespace js
+
+template <>
+inline bool JSObject::is<js::GlobalObject>() const {
+ return !!(getClass()->flags & JSCLASS_IS_GLOBAL);
+}
+
+#endif /* vm_GlobalObject_h */
diff --git a/js/src/vm/HelperThreadState.h b/js/src/vm/HelperThreadState.h
new file mode 100644
index 0000000000..e803559ab4
--- /dev/null
+++ b/js/src/vm/HelperThreadState.h
@@ -0,0 +1,823 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Definitions for managing off-thread work using a process wide list of
+ * worklist items and pool of threads. Worklist items are engine internal, and
+ * are distinct from e.g. web workers.
+ */
+
+#ifndef vm_HelperThreadState_h
+#define vm_HelperThreadState_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/RefPtr.h" // RefPtr
+#include "mozilla/TimeStamp.h"
+#include "mozilla/Vector.h" // mozilla::Vector
+
+#include "ds/Fifo.h"
+#include "frontend/CompilationStencil.h" // CompilationStencil, CompilationGCOutput
+#include "frontend/FrontendContext.h"
+#include "js/CompileOptions.h"
+#include "js/experimental/CompileScript.h" // JS::CompilationStorage
+#include "js/experimental/JSStencil.h" // JS::InstantiationStorage
+#include "js/HelperThreadAPI.h"
+#include "js/TypeDecls.h"
+#include "threading/ConditionVariable.h"
+#include "vm/HelperThreads.h"
+#include "vm/HelperThreadTask.h"
+#include "vm/JSContext.h"
+#include "vm/OffThreadPromiseRuntimeState.h" // js::OffThreadPromiseTask
+
+namespace js {
+
+struct ParseTask;
+struct DelazifyTask;
+struct FreeDelazifyTask;
+struct PromiseHelperTask;
+class PromiseObject;
+
+namespace jit {
+class IonCompileTask;
+class IonFreeTask;
+} // namespace jit
+
+enum class ParseTaskKind {
+ // The output is CompilationStencil for script.
+ ScriptStencil,
+
+ // The output is CompilationStencil for module.
+ ModuleStencil,
+
+ // The output is CompilationStencil for script/stencil.
+ StencilDecode,
+
+ // The output is an array of CompilationStencil.
+ MultiStencilsDecode,
+};
+
+namespace wasm {
+
+struct CompileTask;
+typedef Fifo<CompileTask*, 0, SystemAllocPolicy> CompileTaskPtrFifo;
+
+struct Tier2GeneratorTask : public HelperThreadTask {
+ virtual ~Tier2GeneratorTask() = default;
+ virtual void cancel() = 0;
+};
+
+using UniqueTier2GeneratorTask = UniquePtr<Tier2GeneratorTask>;
+typedef Vector<Tier2GeneratorTask*, 0, SystemAllocPolicy>
+ Tier2GeneratorTaskPtrVector;
+
+} // namespace wasm
+
+// Per-process state for off thread work items.
+class GlobalHelperThreadState {
+ friend class AutoLockHelperThreadState;
+ friend class AutoUnlockHelperThreadState;
+
+ public:
+ // A single tier-2 ModuleGenerator job spawns many compilation jobs, and we
+ // do not want to allow more than one such ModuleGenerator to run at a time.
+ static const size_t MaxTier2GeneratorTasks = 1;
+
+ // Number of CPUs to treat this machine as having when creating threads.
+ // May be accessed without locking.
+ size_t cpuCount;
+
+ // Number of threads to create. May be accessed without locking.
+ size_t threadCount;
+
+ // Thread stack quota to use when running tasks.
+ size_t stackQuota;
+
+ bool terminating_ = false;
+
+ typedef Vector<jit::IonCompileTask*, 0, SystemAllocPolicy>
+ IonCompileTaskVector;
+ using IonFreeTaskVector =
+ Vector<js::UniquePtr<jit::IonFreeTask>, 0, SystemAllocPolicy>;
+ typedef Vector<UniquePtr<ParseTask>, 0, SystemAllocPolicy> ParseTaskVector;
+ using ParseTaskList = mozilla::LinkedList<ParseTask>;
+ using DelazifyTaskList = mozilla::LinkedList<DelazifyTask>;
+ using FreeDelazifyTaskVector =
+ Vector<js::UniquePtr<FreeDelazifyTask>, 1, SystemAllocPolicy>;
+ typedef Vector<UniquePtr<SourceCompressionTask>, 0, SystemAllocPolicy>
+ SourceCompressionTaskVector;
+ typedef Vector<PromiseHelperTask*, 0, SystemAllocPolicy>
+ PromiseHelperTaskVector;
+ typedef Vector<JSContext*, 0, SystemAllocPolicy> ContextVector;
+
+ // Count of running task by each threadType.
+ mozilla::EnumeratedArray<ThreadType, ThreadType::THREAD_TYPE_MAX, size_t>
+ runningTaskCount;
+ size_t totalCountRunningTasks;
+
+ WriteOnceData<JS::RegisterThreadCallback> registerThread;
+ WriteOnceData<JS::UnregisterThreadCallback> unregisterThread;
+
+ private:
+ // The lists below are all protected by |lock|.
+
+ // Ion compilation worklist and finished jobs.
+ IonCompileTaskVector ionWorklist_, ionFinishedList_;
+ IonFreeTaskVector ionFreeList_;
+
+ // wasm worklists.
+ wasm::CompileTaskPtrFifo wasmWorklist_tier1_;
+ wasm::CompileTaskPtrFifo wasmWorklist_tier2_;
+ wasm::Tier2GeneratorTaskPtrVector wasmTier2GeneratorWorklist_;
+
+ // Count of finished Tier2Generator tasks.
+ uint32_t wasmTier2GeneratorsFinished_;
+
+ // Async tasks that, upon completion, are dispatched back to the JSContext's
+ // owner thread via embedding callbacks instead of a finished list.
+ PromiseHelperTaskVector promiseHelperTasks_;
+
+ // Script parsing/emitting worklist and finished jobs.
+ ParseTaskVector parseWorklist_;
+ ParseTaskList parseFinishedList_;
+
+ // Script worklist, which might still have function to delazify.
+ DelazifyTaskList delazifyWorklist_;
+ // Ideally an instance should not have a method to free it-self as, the method
+ // has a this pointer, which aliases the deleted instance, and that the method
+ // might have some of its fields aliased on the stack.
+ //
+ // Delazification task are complex and have a lot of fields. To reduce the
+ // risk of having aliased fields on the stack while deleting instances of a
+ // DelazifyTask, we have FreeDelazifyTask. While FreeDelazifyTask suffer from
+ // the same problem, the limited scope of their actions should mitigate the
+ // risk.
+ FreeDelazifyTaskVector freeDelazifyTaskVector_;
+
+ // Source compression worklist of tasks that we do not yet know can start.
+ SourceCompressionTaskVector compressionPendingList_;
+
+ // Source compression worklist of tasks that can start.
+ SourceCompressionTaskVector compressionWorklist_;
+
+ // Finished source compression tasks.
+ SourceCompressionTaskVector compressionFinishedList_;
+
+ // GC tasks needing to be done in parallel.
+ GCParallelTaskList gcParallelWorklist_;
+ size_t gcParallelThreadCount;
+
+ // Global list of JSContext for GlobalHelperThreadState to use.
+ ContextVector helperContexts_;
+
+ using HelperThreadTaskVector =
+ Vector<HelperThreadTask*, 0, SystemAllocPolicy>;
+ // Vector of running HelperThreadTask.
+ // This is used to get the HelperThreadTask that are currently running.
+ HelperThreadTaskVector helperTasks_;
+
+ // Callback to dispatch a task to a thread pool. Set by
+ // JS::SetHelperThreadTaskCallback. If this is not set the internal thread
+ // pool is used.
+ JS::HelperThreadTaskCallback dispatchTaskCallback = nullptr;
+
+ // The number of tasks dispatched to the thread pool that have not started
+ // running yet.
+ size_t tasksPending_ = 0;
+
+ bool isInitialized_ = false;
+
+ bool useInternalThreadPool_ = true;
+
+ ParseTask* removeFinishedParseTask(JSContext* cx, JS::OffThreadToken* token);
+
+ public:
+ void addSizeOfIncludingThis(JS::GlobalStats* stats,
+ const AutoLockHelperThreadState& lock) const;
+
+ size_t maxIonCompilationThreads() const;
+ size_t maxWasmCompilationThreads() const;
+ size_t maxWasmTier2GeneratorThreads() const;
+ size_t maxPromiseHelperThreads() const;
+ size_t maxParseThreads() const;
+ size_t maxCompressionThreads() const;
+ size_t maxGCParallelThreads(const AutoLockHelperThreadState& lock) const;
+
+ GlobalHelperThreadState();
+
+ bool isInitialized(const AutoLockHelperThreadState& lock) const {
+ return isInitialized_;
+ }
+
+ [[nodiscard]] bool ensureInitialized();
+ [[nodiscard]] bool ensureThreadCount(size_t count,
+ AutoLockHelperThreadState& lock);
+ void finish(AutoLockHelperThreadState& lock);
+ void finishThreads(AutoLockHelperThreadState& lock);
+
+ void setCpuCount(size_t count);
+
+ void setDispatchTaskCallback(JS::HelperThreadTaskCallback callback,
+ size_t threadCount, size_t stackSize,
+ const AutoLockHelperThreadState& lock);
+
+ JSContext* getFirstUnusedContext(AutoLockHelperThreadState& locked);
+ void destroyHelperContexts(AutoLockHelperThreadState& lock);
+
+#ifdef DEBUG
+ void assertIsLockedByCurrentThread() const;
+#endif
+
+ void wait(AutoLockHelperThreadState& locked,
+ mozilla::TimeDuration timeout = mozilla::TimeDuration::Forever());
+ void notifyAll(const AutoLockHelperThreadState&);
+
+ bool useInternalThreadPool(const AutoLockHelperThreadState& lock) const {
+ return useInternalThreadPool_;
+ }
+
+ bool isTerminating(const AutoLockHelperThreadState& locked) const {
+ return terminating_;
+ }
+
+ private:
+ void notifyOne(const AutoLockHelperThreadState&);
+
+ public:
+ // Helper method for removing items from the vectors below while iterating
+ // over them.
+ template <typename T>
+ void remove(T& vector, size_t* index) {
+ // Self-moving is undefined behavior.
+ if (*index != vector.length() - 1) {
+ vector[*index] = std::move(vector.back());
+ }
+ (*index)--;
+ vector.popBack();
+ }
+
+ IonCompileTaskVector& ionWorklist(const AutoLockHelperThreadState&) {
+ return ionWorklist_;
+ }
+ IonCompileTaskVector& ionFinishedList(const AutoLockHelperThreadState&) {
+ return ionFinishedList_;
+ }
+ IonFreeTaskVector& ionFreeList(const AutoLockHelperThreadState&) {
+ return ionFreeList_;
+ }
+
+ wasm::CompileTaskPtrFifo& wasmWorklist(const AutoLockHelperThreadState&,
+ wasm::CompileMode m) {
+ switch (m) {
+ case wasm::CompileMode::Once:
+ case wasm::CompileMode::Tier1:
+ return wasmWorklist_tier1_;
+ case wasm::CompileMode::Tier2:
+ return wasmWorklist_tier2_;
+ default:
+ MOZ_CRASH();
+ }
+ }
+
+ wasm::Tier2GeneratorTaskPtrVector& wasmTier2GeneratorWorklist(
+ const AutoLockHelperThreadState&) {
+ return wasmTier2GeneratorWorklist_;
+ }
+
+ void incWasmTier2GeneratorsFinished(const AutoLockHelperThreadState&) {
+ wasmTier2GeneratorsFinished_++;
+ }
+
+ uint32_t wasmTier2GeneratorsFinished(const AutoLockHelperThreadState&) const {
+ return wasmTier2GeneratorsFinished_;
+ }
+
+ PromiseHelperTaskVector& promiseHelperTasks(
+ const AutoLockHelperThreadState&) {
+ return promiseHelperTasks_;
+ }
+
+ ParseTaskVector& parseWorklist(const AutoLockHelperThreadState&) {
+ return parseWorklist_;
+ }
+ ParseTaskList& parseFinishedList(const AutoLockHelperThreadState&) {
+ return parseFinishedList_;
+ }
+
+ DelazifyTaskList& delazifyWorklist(const AutoLockHelperThreadState&) {
+ return delazifyWorklist_;
+ }
+
+ FreeDelazifyTaskVector& freeDelazifyTaskVector(
+ const AutoLockHelperThreadState&) {
+ return freeDelazifyTaskVector_;
+ }
+
+ SourceCompressionTaskVector& compressionPendingList(
+ const AutoLockHelperThreadState&) {
+ return compressionPendingList_;
+ }
+
+ SourceCompressionTaskVector& compressionWorklist(
+ const AutoLockHelperThreadState&) {
+ return compressionWorklist_;
+ }
+
+ SourceCompressionTaskVector& compressionFinishedList(
+ const AutoLockHelperThreadState&) {
+ return compressionFinishedList_;
+ }
+
+ GCParallelTaskList& gcParallelWorklist() { return gcParallelWorklist_; }
+
+ size_t getGCParallelThreadCount(const AutoLockHelperThreadState& lock) const {
+ return gcParallelThreadCount;
+ }
+ void setGCParallelThreadCount(size_t count,
+ const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(count >= 1);
+ MOZ_ASSERT(count <= threadCount);
+ gcParallelThreadCount = count;
+ }
+
+ HelperThreadTaskVector& helperTasks(const AutoLockHelperThreadState&) {
+ return helperTasks_;
+ }
+
+ bool canStartWasmCompile(const AutoLockHelperThreadState& lock,
+ wasm::CompileMode mode);
+
+ bool canStartWasmTier1CompileTask(const AutoLockHelperThreadState& lock);
+ bool canStartWasmTier2CompileTask(const AutoLockHelperThreadState& lock);
+ bool canStartWasmTier2GeneratorTask(const AutoLockHelperThreadState& lock);
+ bool canStartPromiseHelperTask(const AutoLockHelperThreadState& lock);
+ bool canStartIonCompileTask(const AutoLockHelperThreadState& lock);
+ bool canStartIonFreeTask(const AutoLockHelperThreadState& lock);
+ bool canStartParseTask(const AutoLockHelperThreadState& lock);
+ bool canStartFreeDelazifyTask(const AutoLockHelperThreadState& lock);
+ bool canStartDelazifyTask(const AutoLockHelperThreadState& lock);
+ bool canStartCompressionTask(const AutoLockHelperThreadState& lock);
+ bool canStartGCParallelTask(const AutoLockHelperThreadState& lock);
+
+ HelperThreadTask* maybeGetWasmCompile(const AutoLockHelperThreadState& lock,
+ wasm::CompileMode mode);
+
+ HelperThreadTask* maybeGetWasmTier1CompileTask(
+ const AutoLockHelperThreadState& lock);
+ HelperThreadTask* maybeGetWasmTier2CompileTask(
+ const AutoLockHelperThreadState& lock);
+ HelperThreadTask* maybeGetWasmTier2GeneratorTask(
+ const AutoLockHelperThreadState& lock);
+ HelperThreadTask* maybeGetPromiseHelperTask(
+ const AutoLockHelperThreadState& lock);
+ HelperThreadTask* maybeGetIonCompileTask(
+ const AutoLockHelperThreadState& lock);
+ HelperThreadTask* maybeGetLowPrioIonCompileTask(
+ const AutoLockHelperThreadState& lock);
+ HelperThreadTask* maybeGetIonFreeTask(const AutoLockHelperThreadState& lock);
+ HelperThreadTask* maybeGetParseTask(const AutoLockHelperThreadState& lock);
+ HelperThreadTask* maybeGetFreeDelazifyTask(
+ const AutoLockHelperThreadState& lock);
+ HelperThreadTask* maybeGetDelazifyTask(const AutoLockHelperThreadState& lock);
+ HelperThreadTask* maybeGetCompressionTask(
+ const AutoLockHelperThreadState& lock);
+ HelperThreadTask* maybeGetGCParallelTask(
+ const AutoLockHelperThreadState& lock);
+
+ enum class ScheduleCompressionTask { GC, API };
+
+ // Used by a major GC to signal processing enqueued compression tasks.
+ void startHandlingCompressionTasks(ScheduleCompressionTask schedule,
+ JSRuntime* maybeRuntime,
+ const AutoLockHelperThreadState& lock);
+
+ jit::IonCompileTask* highestPriorityPendingIonCompile(
+ const AutoLockHelperThreadState& lock, bool checkExecutionStatus);
+
+ private:
+ UniquePtr<ParseTask> finishParseTaskCommon(JSContext* cx,
+ JS::OffThreadToken* token);
+
+ bool finishMultiParseTask(JSContext* cx, ParseTaskKind kind,
+ JS::OffThreadToken* token,
+ mozilla::Vector<RefPtr<JS::Stencil>>* stencils);
+
+ public:
+ void cancelParseTask(JSRuntime* rt, JS::OffThreadToken* token);
+ void destroyParseTask(JSRuntime* rt, ParseTask* parseTask);
+
+ void trace(JSTracer* trc);
+
+ already_AddRefed<frontend::CompilationStencil> finishStencilTask(
+ JSContext* cx, JS::OffThreadToken* token,
+ JS::InstantiationStorage* storage);
+ bool finishMultiStencilsDecodeTask(
+ JSContext* cx, JS::OffThreadToken* token,
+ mozilla::Vector<RefPtr<JS::Stencil>>* stencils);
+
+ bool hasActiveThreads(const AutoLockHelperThreadState&);
+ bool canStartTasks(const AutoLockHelperThreadState& locked);
+ void waitForAllTasks();
+ void waitForAllTasksLocked(AutoLockHelperThreadState&);
+
+ bool checkTaskThreadLimit(ThreadType threadType, size_t maxThreads,
+ bool isMaster,
+ const AutoLockHelperThreadState& lock) const;
+ bool checkTaskThreadLimit(ThreadType threadType, size_t maxThreads,
+ const AutoLockHelperThreadState& lock) const {
+ return checkTaskThreadLimit(threadType, maxThreads, /* isMaster */ false,
+ lock);
+ }
+
+ void triggerFreeUnusedMemory();
+
+ private:
+ // Condition variable for notifiying the main thread that a helper task has
+ // completed some work.
+ js::ConditionVariable consumerWakeup;
+
+ void dispatch(JS::DispatchReason reason,
+ const AutoLockHelperThreadState& locked);
+
+ void runTask(HelperThreadTask* task, AutoLockHelperThreadState& lock);
+
+ public:
+ bool submitTask(wasm::UniqueTier2GeneratorTask task);
+ bool submitTask(wasm::CompileTask* task, wasm::CompileMode mode);
+ bool submitTask(UniquePtr<jit::IonFreeTask> task,
+ const AutoLockHelperThreadState& lock);
+ bool submitTask(jit::IonCompileTask* task,
+ const AutoLockHelperThreadState& locked);
+ bool submitTask(UniquePtr<SourceCompressionTask> task,
+ const AutoLockHelperThreadState& locked);
+ bool submitTask(JSRuntime* rt, UniquePtr<ParseTask> task,
+ const AutoLockHelperThreadState& locked);
+ void submitTask(DelazifyTask* task, const AutoLockHelperThreadState& locked);
+ bool submitTask(UniquePtr<FreeDelazifyTask> task,
+ const AutoLockHelperThreadState& locked);
+ bool submitTask(PromiseHelperTask* task);
+ bool submitTask(GCParallelTask* task,
+ const AutoLockHelperThreadState& locked);
+ void runOneTask(AutoLockHelperThreadState& lock);
+ void runTaskLocked(HelperThreadTask* task, AutoLockHelperThreadState& lock);
+
+ using Selector = HelperThreadTask* (
+ GlobalHelperThreadState::*)(const AutoLockHelperThreadState&);
+ static const Selector selectors[];
+
+ HelperThreadTask* findHighestPriorityTask(
+ const AutoLockHelperThreadState& locked);
+};
+
+static inline bool IsHelperThreadStateInitialized() {
+ extern GlobalHelperThreadState* gHelperThreadState;
+ return gHelperThreadState;
+}
+
+static inline GlobalHelperThreadState& HelperThreadState() {
+ extern GlobalHelperThreadState* gHelperThreadState;
+
+ MOZ_ASSERT(gHelperThreadState);
+ return *gHelperThreadState;
+}
+
+class MOZ_RAII AutoSetHelperThreadContext {
+ JSContext* cx;
+ AutoLockHelperThreadState& lock;
+
+ public:
+ AutoSetHelperThreadContext(const JS::ContextOptions& options,
+ AutoLockHelperThreadState& lock);
+ ~AutoSetHelperThreadContext();
+};
+
+struct MOZ_RAII AutoSetContextRuntime {
+ explicit AutoSetContextRuntime(JSRuntime* rt) {
+ TlsContext.get()->setRuntime(rt);
+ }
+ ~AutoSetContextRuntime() { TlsContext.get()->setRuntime(nullptr); }
+};
+
+struct ParseTask : public mozilla::LinkedListElement<ParseTask>,
+ public JS::OffThreadToken,
+ public HelperThreadTask {
+ ParseTaskKind kind;
+ JS::OwningCompileOptions options;
+
+ // Context options from the main thread.
+ const JS::ContextOptions contextOptions;
+
+ // HelperThreads are shared between all runtimes in the process so explicitly
+ // track which one we are associated with.
+ JSRuntime* runtime = nullptr;
+
+ // Callback invoked off thread when the parse finishes.
+ JS::OffThreadCompileCallback callback;
+ void* callbackData;
+
+ // For the multi-decode stencil case, holds onto the set of stencils produced
+ // offthread
+ mozilla::Vector<RefPtr<JS::Stencil>> stencils;
+
+ // The input of the compilation.
+ JS::CompilationStorage compileStorage_;
+
+ // The output of the compilation/decode task.
+ RefPtr<frontend::CompilationStencil> stencil_;
+
+ JS::InstantiationStorage instantiationStorage_;
+
+ // Record any errors happening while parsing or generating bytecode.
+ FrontendContext fc_;
+
+ ParseTask(ParseTaskKind kind, JSContext* cx,
+ JS::OffThreadCompileCallback callback, void* callbackData);
+ virtual ~ParseTask();
+
+ bool init(JSContext* cx, const JS::ReadOnlyCompileOptions& options);
+
+ void moveInstantiationStorageInto(JS::InstantiationStorage& storage);
+
+ void activate(JSRuntime* rt);
+ void deactivate(JSRuntime* rt);
+
+ virtual void parse(FrontendContext* fc) = 0;
+
+ bool runtimeMatches(JSRuntime* rt) { return runtime == rt; }
+
+ void trace(JSTracer* trc);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override;
+ void runTask(AutoLockHelperThreadState& lock);
+ void scheduleDelazifyTask(AutoLockHelperThreadState& lock);
+ ThreadType threadType() override { return ThreadType::THREAD_TYPE_PARSE; }
+};
+
+// Base class for implementing the various strategies to iterate over the
+// functions to be delazified, or to decide when to stop doing any
+// delazification.
+//
+// When created, the `add` function should be called with the top-level
+// ScriptIndex.
+struct DelazifyStrategy {
+ using ScriptIndex = frontend::ScriptIndex;
+ virtual ~DelazifyStrategy() = default;
+
+ // Returns true if no more functions should be delazified. Note, this does not
+ // imply that every function got delazified.
+ virtual bool done() const = 0;
+
+ // Return a function identifier which represent the next function to be
+ // delazified. If no more function should be delazified, then return 0.
+ virtual ScriptIndex next() = 0;
+
+ // Empty the list of functions to be processed next. done() should return true
+ // after this call.
+ virtual void clear() = 0;
+
+ // Insert an index in the container of the delazification strategy. A strategy
+ // can choose to ignore the insertion of an index in its queue of function to
+ // delazify. Return false only in case of errors while inserting, and true
+ // otherwise.
+ [[nodiscard]] virtual bool insert(ScriptIndex index,
+ frontend::ScriptStencilRef& ref) = 0;
+
+ // Add the inner functions of a delazified function. This function should only
+ // be called with a function which has some bytecode associated with it, and
+ // register functions which parent are already delazified.
+ //
+ // This function is called with the script index of:
+ // - top-level script, when starting the off-thread delazification.
+ // - functions added by `add` and delazified by `DelazifyTask`.
+ [[nodiscard]] bool add(FrontendContext* fc,
+ const frontend::CompilationStencil& stencil,
+ ScriptIndex index);
+};
+
+// Delazify all functions using a Depth First traversal of the function-tree
+// ordered, where each functions is visited in source-order.
+//
+// When `add` is called with the top-level ScriptIndex. This will push all inner
+// functions to a stack such that they are popped in source order. Each
+// function, once delazified, would be used to schedule their inner functions
+// the same way.
+//
+// Hypothesis: This strategy parses all functions in source order, with the
+// expectation that calls will follow the same order, and that helper thread
+// would always be ahead of the execution.
+struct DepthFirstDelazification final : public DelazifyStrategy {
+ Vector<ScriptIndex, 0, SystemAllocPolicy> stack;
+
+ bool done() const override { return stack.empty(); }
+ ScriptIndex next() override { return stack.popCopy(); }
+ void clear() override { return stack.clear(); }
+ bool insert(ScriptIndex index, frontend::ScriptStencilRef&) override {
+ return stack.append(index);
+ }
+};
+
+// Delazify all functions using a traversal which select the largest function
+// first. The intent being that if the main thread races with the helper thread,
+// then the main thread should only have to parse small functions instead of the
+// large ones which would be prioritized by this delazification strategy.
+struct LargeFirstDelazification final : public DelazifyStrategy {
+ using SourceSize = uint32_t;
+ Vector<std::pair<SourceSize, ScriptIndex>, 0, SystemAllocPolicy> heap;
+
+ bool done() const override { return heap.empty(); }
+ ScriptIndex next() override;
+ void clear() override { return heap.clear(); }
+ bool insert(ScriptIndex, frontend::ScriptStencilRef&) override;
+};
+
+// Eagerly delazify functions, and send the result back to the runtime which
+// requested the stencil to be parsed, by filling the stencil cache.
+//
+// This task is scheduled multiple times, each time it is scheduled, it
+// delazifies a single function. Once the function is delazified, it schedules
+// the inner functions of the delazified function for delazification using the
+// DelazifyStrategy. The DelazifyStrategy is responsible for ordering and
+// filtering functions to be delazified.
+//
+// When no more function have to be delazified, a FreeDelazifyTask is scheduled
+// to remove the memory held by the DelazifyTask.
+struct DelazifyTask : public mozilla::LinkedListElement<DelazifyTask>,
+ public HelperThreadTask {
+ // HelperThreads are shared between all runtimes in the process so explicitly
+ // track which one we are associated with.
+ JSRuntime* runtime = nullptr;
+
+ // Context options originally from the main thread.
+ const JS::ContextOptions contextOptions;
+
+ // Queue of functions to be processed while delazifying.
+ UniquePtr<DelazifyStrategy> strategy;
+
+ // Every delazified function is merged back to provide context for delazifying
+ // even more functions.
+ frontend::CompilationStencilMerger merger;
+
+ // Record any errors happening while parsing or generating bytecode.
+ FrontendContext fc_;
+
+ // Create a new DelazifyTask and initialize it.
+ //
+ // In case of early failure, no errors are reported, as a DelazifyTask is an
+ // optimization and the VM should remain working even without this
+ // optimization in place.
+ static UniquePtr<DelazifyTask> Create(
+ JSRuntime* runtime, const JS::ContextOptions& contextOptions,
+ const JS::ReadOnlyCompileOptions& options,
+ const frontend::CompilationStencil& stencil);
+
+ DelazifyTask(JSRuntime* runtime, const JS::ContextOptions& options);
+ ~DelazifyTask();
+
+ [[nodiscard]] bool init(
+ const JS::ReadOnlyCompileOptions& options,
+ UniquePtr<frontend::ExtensibleCompilationStencil>&& initial);
+
+ // This function is called by delazify task thread to know whether the task
+ // should be interrupted.
+ //
+ // A delazify task holds on a thread until all functions iterated over by the
+ // strategy. However, as a delazify task iterates over multiple functions, it
+ // can easily be interrupted at function boundaries.
+ //
+ // TODO: (Bug 1773683) Plug this with the mozilla::Task::RequestInterrupt
+ // function which is wrapping HelperThreads tasks within Mozilla.
+ bool isInterrupted() { return false; }
+
+ bool runtimeMatches(JSRuntime* rt) { return runtime == rt; }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override;
+ [[nodiscard]] bool runTask(JSContext* cx);
+ ThreadType threadType() override { return ThreadType::THREAD_TYPE_DELAZIFY; }
+};
+
+// The FreeDelazifyTask exists as this is a bad practice to `js_delete(this)`,
+// as fields might be aliased across the destructor, such as with RAII guards.
+// The FreeDelazifyTask limits the risk of adding these kind of issues by
+// limiting the number of fields to the DelazifyTask pointer, before deleting
+// it-self.
+struct FreeDelazifyTask : public HelperThreadTask {
+ DelazifyTask* task;
+
+ explicit FreeDelazifyTask(DelazifyTask* t) : task(t) {}
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override;
+ ThreadType threadType() override {
+ return ThreadType::THREAD_TYPE_DELAZIFY_FREE;
+ }
+};
+
+// It is not desirable to eagerly compress: if lazy functions that are tied to
+// the ScriptSource were to be executed relatively soon after parsing, they
+// would need to block on decompression, which hurts responsiveness.
+//
+// To this end, compression tasks are heap allocated and enqueued in a pending
+// list by ScriptSource::setSourceCopy. When a major GC occurs, we schedule
+// pending compression tasks and move the ones that are ready to be compressed
+// to the worklist. Currently, a compression task is considered ready 2 major
+// GCs after being enqueued. Completed tasks are handled during the sweeping
+// phase by AttachCompressedSourcesTask, which runs in parallel with other GC
+// sweeping tasks.
+class SourceCompressionTask : public HelperThreadTask {
+ friend class HelperThread;
+ friend class ScriptSource;
+
+ // The runtime that the ScriptSource is associated with, in the sense that
+ // it uses the runtime's immutable string cache.
+ JSRuntime* runtime_;
+
+ // The major GC number of the runtime when the task was enqueued.
+ uint64_t majorGCNumber_;
+
+ // The source to be compressed.
+ RefPtr<ScriptSource> source_;
+
+ // The resultant compressed string. If the compressed string is larger
+ // than the original, or we OOM'd during compression, or nothing else
+ // except the task is holding the ScriptSource alive when scheduled to
+ // compress, this will remain None upon completion.
+ SharedImmutableString resultString_;
+
+ public:
+ // The majorGCNumber is used for scheduling tasks.
+ SourceCompressionTask(JSRuntime* rt, ScriptSource* source)
+ : runtime_(rt), majorGCNumber_(rt->gc.majorGCCount()), source_(source) {
+ source->noteSourceCompressionTask();
+ }
+ virtual ~SourceCompressionTask() = default;
+
+ bool runtimeMatches(JSRuntime* runtime) const { return runtime == runtime_; }
+ bool shouldStart() const {
+ // We wait 2 major GCs to start compressing, in order to avoid
+ // immediate compression.
+ return runtime_->gc.majorGCCount() > majorGCNumber_ + 1;
+ }
+
+ bool shouldCancel() const {
+ // If the refcount is exactly 1, then nothing else is holding on to the
+ // ScriptSource, so no reason to compress it and we should cancel the task.
+ return source_->refs == 1;
+ }
+
+ void runTask();
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override;
+ void complete();
+
+ ThreadType threadType() override { return ThreadType::THREAD_TYPE_COMPRESS; }
+
+ private:
+ struct PerformTaskWork;
+ friend struct PerformTaskWork;
+
+ // The work algorithm, aware whether it's compressing one-byte UTF-8 source
+ // text or UTF-16, for CharT either Utf8Unit or char16_t. Invoked by
+ // work() after doing a type-test of the ScriptSource*.
+ template <typename CharT>
+ void workEncodingSpecific();
+};
+
+// A PromiseHelperTask is an OffThreadPromiseTask that executes a single job on
+// a helper thread. Call js::StartOffThreadPromiseHelperTask to submit a
+// PromiseHelperTask for execution.
+//
+// Concrete subclasses must implement execute and OffThreadPromiseTask::resolve.
+// The helper thread will call execute() to do the main work. Then, the thread
+// of the JSContext used to create the PromiseHelperTask will call resolve() to
+// resolve promise according to those results.
+struct PromiseHelperTask : OffThreadPromiseTask, public HelperThreadTask {
+ PromiseHelperTask(JSContext* cx, Handle<PromiseObject*> promise)
+ : OffThreadPromiseTask(cx, promise) {}
+
+ // To be called on a helper thread and implemented by the derived class.
+ virtual void execute() = 0;
+
+ // May be called in the absence of helper threads or off-thread promise
+ // support to synchronously execute and resolve a PromiseTask.
+ //
+ // Warning: After this function returns, 'this' can be deleted at any time, so
+ // the caller must immediately return from the stream callback.
+ void executeAndResolveAndDestroy(JSContext* cx);
+
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override;
+ ThreadType threadType() override { return THREAD_TYPE_PROMISE_TASK; }
+};
+
+} /* namespace js */
+
+#endif /* vm_HelperThreadState_h */
diff --git a/js/src/vm/HelperThreadTask.h b/js/src/vm/HelperThreadTask.h
new file mode 100644
index 0000000000..fc36b924a4
--- /dev/null
+++ b/js/src/vm/HelperThreadTask.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_HelperThreadTask_h
+#define vm_HelperThreadTask_h
+
+#include "mozilla/TimeStamp.h"
+
+#include "js/Utility.h"
+
+namespace js {
+
+class AutoLockHelperThreadState;
+struct ParseTask;
+struct DelazifyTask;
+struct FreeDelazifyTask;
+class SourceCompressionTask;
+
+namespace jit {
+class IonCompileTask;
+class IonFreeTask;
+} // namespace jit
+namespace wasm {
+struct Tier2GeneratorTask;
+} // namespace wasm
+
+template <typename T>
+struct MapTypeToThreadType {};
+
+template <>
+struct MapTypeToThreadType<jit::IonCompileTask> {
+ static const ThreadType threadType = THREAD_TYPE_ION;
+};
+
+template <>
+struct MapTypeToThreadType<wasm::Tier2GeneratorTask> {
+ static const ThreadType threadType = THREAD_TYPE_WASM_GENERATOR_TIER2;
+};
+
+template <>
+struct MapTypeToThreadType<ParseTask> {
+ static const ThreadType threadType = THREAD_TYPE_PARSE;
+};
+
+template <>
+struct MapTypeToThreadType<DelazifyTask> {
+ static const ThreadType threadType = THREAD_TYPE_DELAZIFY;
+};
+
+template <>
+struct MapTypeToThreadType<FreeDelazifyTask> {
+ static const ThreadType threadType = THREAD_TYPE_DELAZIFY_FREE;
+};
+
+template <>
+struct MapTypeToThreadType<SourceCompressionTask> {
+ static const ThreadType threadType = THREAD_TYPE_COMPRESS;
+};
+
+struct HelperThreadTask {
+ virtual void runHelperThreadTask(AutoLockHelperThreadState& locked) = 0;
+ virtual ThreadType threadType() = 0;
+ virtual ~HelperThreadTask() = default;
+
+ template <typename T>
+ bool is() {
+ return MapTypeToThreadType<T>::threadType == threadType();
+ }
+
+ template <typename T>
+ T* as() {
+ MOZ_ASSERT(this->is<T>());
+ return static_cast<T*>(this);
+ }
+};
+
+} // namespace js
+
+#endif /* vm_HelperThreadTask_h */
diff --git a/js/src/vm/HelperThreads.cpp b/js/src/vm/HelperThreads.cpp
new file mode 100644
index 0000000000..92d6f41e03
--- /dev/null
+++ b/js/src/vm/HelperThreads.cpp
@@ -0,0 +1,2745 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/HelperThreads.h"
+
+#include "mozilla/ReverseIterator.h" // mozilla::Reversed(...)
+#include "mozilla/ScopeExit.h"
+#include "mozilla/Span.h" // mozilla::Span<TaggedScriptThingIndex>
+#include "mozilla/Utf8.h" // mozilla::Utf8Unit
+
+#include <algorithm>
+
+#include "frontend/BytecodeCompilation.h" // frontend::{CompileGlobalScriptToExtensibleStencil, FireOnNewScript}
+#include "frontend/BytecodeCompiler.h" // frontend::ParseModuleToExtensibleStencil
+#include "frontend/CompilationStencil.h" // frontend::{CompilationStencil, ExtensibleCompilationStencil, CompilationInput, BorrowingCompilationStencil, ScriptStencilRef}
+#include "frontend/FrontendContext.h"
+#include "frontend/ScopeBindingCache.h" // frontend::ScopeBindingCache
+#include "gc/GC.h"
+#include "jit/IonCompileTask.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitScript.h"
+#include "js/CompileOptions.h" // JS::CompileOptions, JS::DecodeOptions, JS::ReadOnlyCompileOptions
+#include "js/ContextOptions.h" // JS::ContextOptions
+#include "js/experimental/CompileScript.h"
+#include "js/experimental/JSStencil.h"
+#include "js/friend/StackLimits.h" // js::ReportOverRecursed
+#include "js/HelperThreadAPI.h"
+#include "js/OffThreadScriptCompilation.h" // JS::OffThreadToken, JS::OffThreadCompileCallback
+#include "js/SourceText.h"
+#include "js/Stack.h"
+#include "js/Transcoding.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "threading/CpuCount.h"
+#include "vm/ErrorReporting.h"
+#include "vm/HelperThreadState.h"
+#include "vm/InternalThreadPool.h"
+#include "vm/MutexIDs.h"
+#include "wasm/WasmGenerator.h"
+
+using namespace js;
+
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+using mozilla::Utf8Unit;
+
+using JS::CompileOptions;
+using JS::DispatchReason;
+using JS::ReadOnlyCompileOptions;
+
+namespace js {
+
+Mutex gHelperThreadLock(mutexid::GlobalHelperThreadState);
+GlobalHelperThreadState* gHelperThreadState = nullptr;
+
+} // namespace js
+
+bool js::CreateHelperThreadsState() {
+ MOZ_ASSERT(!gHelperThreadState);
+ gHelperThreadState = js_new<GlobalHelperThreadState>();
+ return gHelperThreadState;
+}
+
+void js::DestroyHelperThreadsState() {
+ AutoLockHelperThreadState lock;
+
+ if (!gHelperThreadState) {
+ return;
+ }
+
+ gHelperThreadState->finish(lock);
+ js_delete(gHelperThreadState);
+ gHelperThreadState = nullptr;
+}
+
+bool js::EnsureHelperThreadsInitialized() {
+ MOZ_ASSERT(gHelperThreadState);
+ return gHelperThreadState->ensureInitialized();
+}
+
+static size_t ClampDefaultCPUCount(size_t cpuCount) {
+ // It's extremely rare for SpiderMonkey to have more than a few cores worth
+ // of work. At higher core counts, performance can even decrease due to NUMA
+ // (and SpiderMonkey's lack of NUMA-awareness), contention, and general lack
+ // of optimization for high core counts. So to avoid wasting thread stack
+ // resources (and cluttering gdb and core dumps), clamp to 8 cores for now.
+ return std::min<size_t>(cpuCount, 8);
+}
+
+static size_t ThreadCountForCPUCount(size_t cpuCount) {
+ // We need at least two threads for tier-2 wasm compilations, because
+ // there's a master task that holds a thread while other threads do the
+ // compilation.
+ return std::max<size_t>(cpuCount, 2);
+}
+
+bool js::SetFakeCPUCount(size_t count) {
+ HelperThreadState().setCpuCount(count);
+ return true;
+}
+
+void GlobalHelperThreadState::setCpuCount(size_t count) {
+ // This must be called before any threads have been initialized.
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(!isInitialized(lock));
+
+ // We can't do this if an external thread pool is in use.
+ MOZ_ASSERT(!dispatchTaskCallback);
+
+ cpuCount = count;
+ threadCount = ThreadCountForCPUCount(count);
+}
+
+size_t js::GetHelperThreadCount() { return HelperThreadState().threadCount; }
+
+size_t js::GetHelperThreadCPUCount() { return HelperThreadState().cpuCount; }
+
+size_t js::GetMaxWasmCompilationThreads() {
+ return HelperThreadState().maxWasmCompilationThreads();
+}
+
+void JS::SetProfilingThreadCallbacks(
+ JS::RegisterThreadCallback registerThread,
+ JS::UnregisterThreadCallback unregisterThread) {
+ HelperThreadState().registerThread = registerThread;
+ HelperThreadState().unregisterThread = unregisterThread;
+}
+
+static size_t ThreadStackQuotaForSize(size_t size) {
+ // Set the stack quota to 10% less that the actual size.
+ return size_t(double(size) * 0.9);
+}
+
+// Bug 1630189: Without MOZ_NEVER_INLINE, Windows PGO builds have a linking
+// error for HelperThreadTaskCallback.
+JS_PUBLIC_API MOZ_NEVER_INLINE void JS::SetHelperThreadTaskCallback(
+ HelperThreadTaskCallback callback, size_t threadCount, size_t stackSize) {
+ AutoLockHelperThreadState lock;
+ HelperThreadState().setDispatchTaskCallback(callback, threadCount, stackSize,
+ lock);
+}
+
+void GlobalHelperThreadState::setDispatchTaskCallback(
+ JS::HelperThreadTaskCallback callback, size_t threadCount, size_t stackSize,
+ const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(!isInitialized(lock));
+ MOZ_ASSERT(!dispatchTaskCallback);
+ MOZ_ASSERT(threadCount != 0);
+ MOZ_ASSERT(stackSize >= 16 * 1024);
+
+ dispatchTaskCallback = callback;
+ this->threadCount = threadCount;
+ this->stackQuota = ThreadStackQuotaForSize(stackSize);
+}
+
+bool js::StartOffThreadWasmCompile(wasm::CompileTask* task,
+ wasm::CompileMode mode) {
+ return HelperThreadState().submitTask(task, mode);
+}
+
+bool GlobalHelperThreadState::submitTask(wasm::CompileTask* task,
+ wasm::CompileMode mode) {
+ AutoLockHelperThreadState lock;
+ if (!wasmWorklist(lock, mode).pushBack(task)) {
+ return false;
+ }
+
+ dispatch(DispatchReason::NewTask, lock);
+ return true;
+}
+
+size_t js::RemovePendingWasmCompileTasks(
+ const wasm::CompileTaskState& taskState, wasm::CompileMode mode,
+ const AutoLockHelperThreadState& lock) {
+ wasm::CompileTaskPtrFifo& worklist =
+ HelperThreadState().wasmWorklist(lock, mode);
+ return worklist.eraseIf([&taskState](wasm::CompileTask* task) {
+ return &task->state == &taskState;
+ });
+}
+
+void js::StartOffThreadWasmTier2Generator(wasm::UniqueTier2GeneratorTask task) {
+ (void)HelperThreadState().submitTask(std::move(task));
+}
+
+bool GlobalHelperThreadState::submitTask(wasm::UniqueTier2GeneratorTask task) {
+ AutoLockHelperThreadState lock;
+
+ MOZ_ASSERT(isInitialized(lock));
+
+ if (!wasmTier2GeneratorWorklist(lock).append(task.get())) {
+ return false;
+ }
+ (void)task.release();
+
+ dispatch(DispatchReason::NewTask, lock);
+ return true;
+}
+
+static void CancelOffThreadWasmTier2GeneratorLocked(
+ AutoLockHelperThreadState& lock) {
+ if (!HelperThreadState().isInitialized(lock)) {
+ return;
+ }
+
+ // Remove pending tasks from the tier2 generator worklist and cancel and
+ // delete them.
+ {
+ wasm::Tier2GeneratorTaskPtrVector& worklist =
+ HelperThreadState().wasmTier2GeneratorWorklist(lock);
+ for (size_t i = 0; i < worklist.length(); i++) {
+ wasm::Tier2GeneratorTask* task = worklist[i];
+ HelperThreadState().remove(worklist, &i);
+ js_delete(task);
+ }
+ }
+
+ // There is at most one running Tier2Generator task and we assume that
+ // below.
+ static_assert(GlobalHelperThreadState::MaxTier2GeneratorTasks == 1,
+ "code must be generalized");
+
+ // If there is a running Tier2 generator task, shut it down in a predictable
+ // way. The task will be deleted by the normal deletion logic.
+ for (auto* helper : HelperThreadState().helperTasks(lock)) {
+ if (helper->is<wasm::Tier2GeneratorTask>()) {
+ // Set a flag that causes compilation to shortcut itself.
+ helper->as<wasm::Tier2GeneratorTask>()->cancel();
+
+ // Wait for the generator task to finish. This avoids a shutdown race
+ // where the shutdown code is trying to shut down helper threads and the
+ // ongoing tier2 compilation is trying to finish, which requires it to
+ // have access to helper threads.
+ uint32_t oldFinishedCount =
+ HelperThreadState().wasmTier2GeneratorsFinished(lock);
+ while (HelperThreadState().wasmTier2GeneratorsFinished(lock) ==
+ oldFinishedCount) {
+ HelperThreadState().wait(lock);
+ }
+
+ // At most one of these tasks.
+ break;
+ }
+ }
+}
+
+void js::CancelOffThreadWasmTier2Generator() {
+ AutoLockHelperThreadState lock;
+ CancelOffThreadWasmTier2GeneratorLocked(lock);
+}
+
+bool js::StartOffThreadIonCompile(jit::IonCompileTask* task,
+ const AutoLockHelperThreadState& lock) {
+ return HelperThreadState().submitTask(task, lock);
+}
+
+bool GlobalHelperThreadState::submitTask(
+ jit::IonCompileTask* task, const AutoLockHelperThreadState& locked) {
+ MOZ_ASSERT(isInitialized(locked));
+
+ if (!ionWorklist(locked).append(task)) {
+ return false;
+ }
+
+ // The build is moving off-thread. Freeze the LifoAlloc to prevent any
+ // unwanted mutations.
+ task->alloc().lifoAlloc()->setReadOnly();
+
+ dispatch(DispatchReason::NewTask, locked);
+ return true;
+}
+
+bool js::StartOffThreadIonFree(jit::IonCompileTask* task,
+ const AutoLockHelperThreadState& lock) {
+ js::UniquePtr<jit::IonFreeTask> freeTask =
+ js::MakeUnique<jit::IonFreeTask>(task);
+ if (!freeTask) {
+ return false;
+ }
+
+ return HelperThreadState().submitTask(std::move(freeTask), lock);
+}
+
+bool GlobalHelperThreadState::submitTask(
+ UniquePtr<jit::IonFreeTask> task, const AutoLockHelperThreadState& locked) {
+ MOZ_ASSERT(isInitialized(locked));
+
+ if (!ionFreeList(locked).append(std::move(task))) {
+ return false;
+ }
+
+ dispatch(DispatchReason::NewTask, locked);
+ return true;
+}
+
+/*
+ * Move an IonCompilationTask for which compilation has either finished, failed,
+ * or been cancelled into the global finished compilation list. All off thread
+ * compilations which are started must eventually be finished.
+ */
+void js::FinishOffThreadIonCompile(jit::IonCompileTask* task,
+ const AutoLockHelperThreadState& lock) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!HelperThreadState().ionFinishedList(lock).append(task)) {
+ oomUnsafe.crash("FinishOffThreadIonCompile");
+ }
+ task->script()
+ ->runtimeFromAnyThread()
+ ->jitRuntime()
+ ->numFinishedOffThreadTasksRef(lock)++;
+}
+
+static JSRuntime* GetSelectorRuntime(const CompilationSelector& selector) {
+ struct Matcher {
+ JSRuntime* operator()(JSScript* script) {
+ return script->runtimeFromMainThread();
+ }
+ JSRuntime* operator()(Realm* realm) {
+ return realm->runtimeFromMainThread();
+ }
+ JSRuntime* operator()(Zone* zone) { return zone->runtimeFromMainThread(); }
+ JSRuntime* operator()(ZonesInState zbs) { return zbs.runtime; }
+ JSRuntime* operator()(JSRuntime* runtime) { return runtime; }
+ };
+
+ return selector.match(Matcher());
+}
+
+static bool JitDataStructuresExist(const CompilationSelector& selector) {
+ struct Matcher {
+ bool operator()(JSScript* script) { return !!script->realm()->jitRealm(); }
+ bool operator()(Realm* realm) { return !!realm->jitRealm(); }
+ bool operator()(Zone* zone) { return !!zone->jitZone(); }
+ bool operator()(ZonesInState zbs) { return zbs.runtime->hasJitRuntime(); }
+ bool operator()(JSRuntime* runtime) { return runtime->hasJitRuntime(); }
+ };
+
+ return selector.match(Matcher());
+}
+
+static bool IonCompileTaskMatches(const CompilationSelector& selector,
+ jit::IonCompileTask* task) {
+ struct TaskMatches {
+ jit::IonCompileTask* task_;
+
+ bool operator()(JSScript* script) { return script == task_->script(); }
+ bool operator()(Realm* realm) { return realm == task_->script()->realm(); }
+ bool operator()(Zone* zone) {
+ return zone == task_->script()->zoneFromAnyThread();
+ }
+ bool operator()(JSRuntime* runtime) {
+ return runtime == task_->script()->runtimeFromAnyThread();
+ }
+ bool operator()(ZonesInState zbs) {
+ return zbs.runtime == task_->script()->runtimeFromAnyThread() &&
+ zbs.state == task_->script()->zoneFromAnyThread()->gcState();
+ }
+ };
+
+ return selector.match(TaskMatches{task});
+}
+
+static void CancelOffThreadIonCompileLocked(const CompilationSelector& selector,
+ AutoLockHelperThreadState& lock) {
+ if (!HelperThreadState().isInitialized(lock)) {
+ return;
+ }
+
+ MOZ_ASSERT(GetSelectorRuntime(selector)->jitRuntime() != nullptr);
+
+ /* Cancel any pending entries for which processing hasn't started. */
+ GlobalHelperThreadState::IonCompileTaskVector& worklist =
+ HelperThreadState().ionWorklist(lock);
+ for (size_t i = 0; i < worklist.length(); i++) {
+ jit::IonCompileTask* task = worklist[i];
+ if (IonCompileTaskMatches(selector, task)) {
+ // Once finished, tasks are added to a Linked list which is
+ // allocated with the IonCompileTask class. The IonCompileTask is
+ // allocated in the LifoAlloc so we need the LifoAlloc to be mutable.
+ worklist[i]->alloc().lifoAlloc()->setReadWrite();
+
+ FinishOffThreadIonCompile(task, lock);
+ HelperThreadState().remove(worklist, &i);
+ }
+ }
+
+ /* Wait for in progress entries to finish up. */
+ bool cancelled;
+ do {
+ cancelled = false;
+ for (auto* helper : HelperThreadState().helperTasks(lock)) {
+ if (!helper->is<jit::IonCompileTask>()) {
+ continue;
+ }
+
+ jit::IonCompileTask* ionCompileTask = helper->as<jit::IonCompileTask>();
+ if (IonCompileTaskMatches(selector, ionCompileTask)) {
+ ionCompileTask->mirGen().cancel();
+ cancelled = true;
+ }
+ }
+ if (cancelled) {
+ HelperThreadState().wait(lock);
+ }
+ } while (cancelled);
+
+ /* Cancel code generation for any completed entries. */
+ GlobalHelperThreadState::IonCompileTaskVector& finished =
+ HelperThreadState().ionFinishedList(lock);
+ for (size_t i = 0; i < finished.length(); i++) {
+ jit::IonCompileTask* task = finished[i];
+ if (IonCompileTaskMatches(selector, task)) {
+ JSRuntime* rt = task->script()->runtimeFromAnyThread();
+ rt->jitRuntime()->numFinishedOffThreadTasksRef(lock)--;
+ jit::FinishOffThreadTask(rt, task, lock);
+ HelperThreadState().remove(finished, &i);
+ }
+ }
+
+ /* Cancel lazy linking for pending tasks (attached to the ionScript). */
+ JSRuntime* runtime = GetSelectorRuntime(selector);
+ jit::IonCompileTask* task =
+ runtime->jitRuntime()->ionLazyLinkList(runtime).getFirst();
+ while (task) {
+ jit::IonCompileTask* next = task->getNext();
+ if (IonCompileTaskMatches(selector, task)) {
+ jit::FinishOffThreadTask(runtime, task, lock);
+ }
+ task = next;
+ }
+}
+
+void js::CancelOffThreadIonCompile(const CompilationSelector& selector) {
+ if (!JitDataStructuresExist(selector)) {
+ return;
+ }
+
+ AutoLockHelperThreadState lock;
+ CancelOffThreadIonCompileLocked(selector, lock);
+}
+
+#ifdef DEBUG
+bool js::HasOffThreadIonCompile(Realm* realm) {
+ AutoLockHelperThreadState lock;
+
+ if (!HelperThreadState().isInitialized(lock)) {
+ return false;
+ }
+
+ GlobalHelperThreadState::IonCompileTaskVector& worklist =
+ HelperThreadState().ionWorklist(lock);
+ for (size_t i = 0; i < worklist.length(); i++) {
+ jit::IonCompileTask* task = worklist[i];
+ if (task->script()->realm() == realm) {
+ return true;
+ }
+ }
+
+ for (auto* helper : HelperThreadState().helperTasks(lock)) {
+ if (helper->is<jit::IonCompileTask>() &&
+ helper->as<jit::IonCompileTask>()->script()->realm() == realm) {
+ return true;
+ }
+ }
+
+ GlobalHelperThreadState::IonCompileTaskVector& finished =
+ HelperThreadState().ionFinishedList(lock);
+ for (size_t i = 0; i < finished.length(); i++) {
+ jit::IonCompileTask* task = finished[i];
+ if (task->script()->realm() == realm) {
+ return true;
+ }
+ }
+
+ JSRuntime* rt = realm->runtimeFromMainThread();
+ jit::IonCompileTask* task = rt->jitRuntime()->ionLazyLinkList(rt).getFirst();
+ while (task) {
+ if (task->script()->realm() == realm) {
+ return true;
+ }
+ task = task->getNext();
+ }
+
+ return false;
+}
+#endif
+
+struct MOZ_RAII AutoSetContextFrontendErrors {
+ explicit AutoSetContextFrontendErrors(FrontendContext* fc) {
+ fc->linkWithJSContext(TlsContext.get());
+ }
+ ~AutoSetContextFrontendErrors() {
+ TlsContext.get()->setFrontendErrors(nullptr);
+ }
+};
+
+AutoSetHelperThreadContext::AutoSetHelperThreadContext(
+ const JS::ContextOptions& options, AutoLockHelperThreadState& lock)
+ : lock(lock) {
+ cx = HelperThreadState().getFirstUnusedContext(lock);
+ MOZ_ASSERT(cx);
+ cx->setHelperThread(options, lock);
+}
+
+AutoSetHelperThreadContext::~AutoSetHelperThreadContext() {
+ cx->tempLifoAlloc().releaseAll();
+ if (cx->shouldFreeUnusedMemory()) {
+ cx->tempLifoAlloc().freeAll();
+ cx->setFreeUnusedMemory(false);
+ }
+ cx->clearHelperThread(lock);
+ cx = nullptr;
+}
+
+ParseTask::ParseTask(ParseTaskKind kind, JSContext* cx,
+ JS::OffThreadCompileCallback callback, void* callbackData)
+ : kind(kind),
+ options(cx),
+ contextOptions(cx->options()),
+ callback(callback),
+ callbackData(callbackData) {
+ // Note that |cx| is the main thread context here but the parse task will
+ // run with a different, helper thread, context.
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+}
+
+bool ParseTask::init(JSContext* cx, const ReadOnlyCompileOptions& options) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+
+ if (!this->options.copy(cx, options)) {
+ return false;
+ }
+
+ runtime = cx->runtime();
+
+ if (!fc_.allocateOwnedPool()) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // MultiStencilsDecode doesn't support JS::InstantiationStorage.
+ MOZ_ASSERT_IF(this->options.allocateInstantiationStorage,
+ kind != ParseTaskKind::MultiStencilsDecode);
+
+ return true;
+}
+
+void ParseTask::moveInstantiationStorageInto(
+ JS::InstantiationStorage& storage) {
+ storage.gcOutput_ = instantiationStorage_.gcOutput_;
+ instantiationStorage_.gcOutput_ = nullptr;
+}
+
+ParseTask::~ParseTask() {
+ // The LinkedListElement destructor will remove us from any list we are part
+ // of without synchronization, so ensure that doesn't happen.
+ MOZ_DIAGNOSTIC_ASSERT(!isInList());
+}
+
+void ParseTask::trace(JSTracer* trc) {
+ if (runtime != trc->runtime()) {
+ return;
+ }
+
+ compileStorage_.trace(trc);
+ instantiationStorage_.trace(trc);
+}
+
+size_t ParseTask::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t compileStorageSize = compileStorage_.sizeOfIncludingThis(mallocSizeOf);
+ size_t stencilSize =
+ stencil_ ? stencil_->sizeOfIncludingThis(mallocSizeOf) : 0;
+ size_t gcOutputSize =
+ instantiationStorage_.gcOutput_
+ ? instantiationStorage_.gcOutput_->sizeOfExcludingThis(mallocSizeOf)
+ : 0;
+
+ // TODO: 'errors' requires adding support to `CompileError`. They are not
+ // common though.
+
+ return options.sizeOfExcludingThis(mallocSizeOf) + compileStorageSize +
+ stencilSize + gcOutputSize;
+}
+
+void ParseTask::runHelperThreadTask(AutoLockHelperThreadState& locked) {
+ runTask(locked);
+
+ // Schedule DelazifyTask if needed. NOTE: This should be done before adding
+ // this task to the finished list, as we temporarily release the lock to make
+ // a few large allocations.
+ scheduleDelazifyTask(locked);
+
+ // The callback is invoked while we are still off thread.
+ callback(this, callbackData);
+
+ // FinishOffThreadScript will need to be called on the script to
+ // migrate it into the correct compartment.
+ HelperThreadState().parseFinishedList(locked).insertBack(this);
+}
+
+void ParseTask::runTask(AutoLockHelperThreadState& lock) {
+ fc_.setStackQuota(HelperThreadState().stackQuota);
+
+ AutoUnlockHelperThreadState unlock(lock);
+
+ parse(&fc_);
+
+ fc_.nameCollectionPool().purge();
+}
+
+void ParseTask::scheduleDelazifyTask(AutoLockHelperThreadState& lock) {
+ if (!stencil_) {
+ return;
+ }
+
+ // Skip delazify tasks if we parese everything on-demand or ahead.
+ auto strategy = options.eagerDelazificationStrategy();
+ if (strategy == JS::DelazificationOption::OnDemandOnly ||
+ strategy == JS::DelazificationOption::ParseEverythingEagerly) {
+ return;
+ }
+
+ UniquePtr<DelazifyTask> task;
+ {
+ AutoSetHelperThreadContext usesContext(contextOptions, lock);
+ AutoUnlockHelperThreadState unlock(lock);
+ AutoSetContextRuntime ascr(runtime);
+
+ task = DelazifyTask::Create(runtime, contextOptions, options, *stencil_);
+ if (!task) {
+ return;
+ }
+ }
+
+ // Schedule delazification task if there is any function to delazify.
+ if (!task->strategy->done()) {
+ HelperThreadState().submitTask(task.release(), lock);
+ }
+}
+
+template <typename Unit>
+struct CompileToStencilTask : public ParseTask {
+ JS::SourceText<Unit> data;
+
+ CompileToStencilTask(JSContext* cx, JS::SourceText<Unit>& srcBuf,
+ JS::OffThreadCompileCallback callback,
+ void* callbackData);
+ void parse(FrontendContext* fc) override;
+};
+
+template <typename Unit>
+struct CompileModuleToStencilTask : public ParseTask {
+ JS::SourceText<Unit> data;
+
+ CompileModuleToStencilTask(JSContext* cx, JS::SourceText<Unit>& srcBuf,
+ JS::OffThreadCompileCallback callback,
+ void* callbackData);
+ void parse(FrontendContext* fc) override;
+};
+
+struct DecodeStencilTask : public ParseTask {
+ const JS::TranscodeRange range;
+
+ DecodeStencilTask(JSContext* cx, const JS::TranscodeRange& range,
+ JS::OffThreadCompileCallback callback, void* callbackData);
+ void parse(FrontendContext* fc) override;
+};
+
+struct MultiStencilsDecodeTask : public ParseTask {
+ JS::TranscodeSources* sources;
+
+ MultiStencilsDecodeTask(JSContext* cx, JS::TranscodeSources& sources,
+ JS::OffThreadCompileCallback callback,
+ void* callbackData);
+ void parse(FrontendContext* fc) override;
+};
+
+template <typename Unit>
+CompileToStencilTask<Unit>::CompileToStencilTask(
+ JSContext* cx, JS::SourceText<Unit>& srcBuf,
+ JS::OffThreadCompileCallback callback, void* callbackData)
+ : ParseTask(ParseTaskKind::ScriptStencil, cx, callback, callbackData),
+ data(std::move(srcBuf)) {}
+
+template <typename Unit>
+void CompileToStencilTask<Unit>::parse(FrontendContext* fc) {
+ stencil_ =
+ JS::CompileGlobalScriptToStencil(fc, options, data, compileStorage_);
+ if (!stencil_) {
+ return;
+ }
+
+ if (options.allocateInstantiationStorage) {
+ if (!JS::PrepareForInstantiate(fc, compileStorage_, *stencil_,
+ instantiationStorage_)) {
+ stencil_ = nullptr;
+ }
+ }
+}
+
+template <typename Unit>
+CompileModuleToStencilTask<Unit>::CompileModuleToStencilTask(
+ JSContext* cx, JS::SourceText<Unit>& srcBuf,
+ JS::OffThreadCompileCallback callback, void* callbackData)
+ : ParseTask(ParseTaskKind::ModuleStencil, cx, callback, callbackData),
+ data(std::move(srcBuf)) {}
+
+template <typename Unit>
+void CompileModuleToStencilTask<Unit>::parse(FrontendContext* fc) {
+ stencil_ =
+ JS::CompileModuleScriptToStencil(fc, options, data, compileStorage_);
+ if (!stencil_) {
+ return;
+ }
+
+ if (options.allocateInstantiationStorage) {
+ if (!JS::PrepareForInstantiate(fc, compileStorage_, *stencil_,
+ instantiationStorage_)) {
+ stencil_ = nullptr;
+ }
+ }
+}
+
+DecodeStencilTask::DecodeStencilTask(JSContext* cx,
+ const JS::TranscodeRange& range,
+ JS::OffThreadCompileCallback callback,
+ void* callbackData)
+ : ParseTask(ParseTaskKind::StencilDecode, cx, callback, callbackData),
+ range(range) {
+ MOZ_ASSERT(JS::IsTranscodingBytecodeAligned(range.begin().get()));
+}
+
+static void ReportDecodeFailure(JS::FrontendContext* fc, ...) {
+ va_list ap;
+ va_start(ap, fc);
+
+ js::ErrorMetadata metadata;
+ metadata.filename = "<unknown>";
+ metadata.lineNumber = 0;
+ metadata.columnNumber = 0;
+ metadata.lineLength = 0;
+ metadata.tokenOffset = 0;
+ metadata.isMuted = false;
+
+ js::ReportCompileErrorLatin1(fc, std::move(metadata), nullptr,
+ JSMSG_DECODE_FAILURE, &ap);
+
+ va_end(ap);
+}
+
+void DecodeStencilTask::parse(FrontendContext* fc) {
+ if (!compileStorage_.allocateInput(fc, options)) {
+ return;
+ }
+ if (!compileStorage_.getInput().initForGlobal(fc)) {
+ return;
+ }
+
+ stencil_ = fc->getAllocator()->new_<frontend::CompilationStencil>(
+ compileStorage_.getInput().source);
+ if (!stencil_) {
+ return;
+ }
+
+ bool succeeded = false;
+ (void)stencil_->deserializeStencils(fc, options, range, &succeeded);
+ if (!succeeded) {
+ if (!fc->hadErrors()) {
+ ReportDecodeFailure(fc);
+ }
+ stencil_ = nullptr;
+ return;
+ }
+
+ if (options.allocateInstantiationStorage) {
+ if (!JS::PrepareForInstantiate(fc, compileStorage_, *stencil_,
+ instantiationStorage_)) {
+ stencil_ = nullptr;
+ }
+ }
+}
+
+MultiStencilsDecodeTask::MultiStencilsDecodeTask(
+ JSContext* cx, JS::TranscodeSources& sources,
+ JS::OffThreadCompileCallback callback, void* callbackData)
+ : ParseTask(ParseTaskKind::MultiStencilsDecode, cx, callback, callbackData),
+ sources(&sources) {}
+
+void MultiStencilsDecodeTask::parse(FrontendContext* fc) {
+ if (!stencils.reserve(sources->length())) {
+ ReportOutOfMemory(fc); // This sets |outOfMemory|.
+ return;
+ }
+
+ for (auto& source : *sources) {
+ frontend::CompilationInput stencilInput(options);
+ if (!stencilInput.initForGlobal(fc)) {
+ break;
+ }
+
+ RefPtr<frontend::CompilationStencil> stencil =
+ fc->getAllocator()->new_<frontend::CompilationStencil>(
+ stencilInput.source);
+ if (!stencil) {
+ break;
+ }
+ bool succeeded = false;
+ (void)stencil->deserializeStencils(fc, options, source.range, &succeeded);
+ if (!succeeded) {
+ // If any decodes fail, don't process the rest. We likely are hitting OOM.
+ break;
+ }
+ stencils.infallibleEmplaceBack(stencil.forget());
+ }
+}
+
+void js::StartOffThreadDelazification(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ const frontend::CompilationStencil& stencil) {
+ // Skip delazify tasks if we parse everything on-demand or ahead.
+ auto strategy = options.eagerDelazificationStrategy();
+ if (strategy == JS::DelazificationOption::OnDemandOnly ||
+ strategy == JS::DelazificationOption::ParseEverythingEagerly) {
+ return;
+ }
+
+ // Skip delazify task if code coverage is enabled.
+ if (cx->realm()->collectCoverageForDebug()) {
+ return;
+ }
+
+ if (!CanUseExtraThreads()) {
+ return;
+ }
+
+ AutoAssertNoPendingException aanpe(cx);
+
+ JSRuntime* runtime = cx->runtime();
+ UniquePtr<DelazifyTask> task;
+ task = DelazifyTask::Create(runtime, cx->options(), options, stencil);
+ if (!task) {
+ return;
+ }
+
+ // Schedule delazification task if there is any function to delazify.
+ if (!task->strategy->done()) {
+ AutoLockHelperThreadState lock;
+ HelperThreadState().submitTask(task.release(), lock);
+ }
+}
+
+bool DelazifyStrategy::add(FrontendContext* fc,
+ const frontend::CompilationStencil& stencil,
+ ScriptIndex index) {
+ using namespace js::frontend;
+ ScriptStencilRef scriptRef{stencil, index};
+
+ // Only functions with bytecode are allowed to be added.
+ MOZ_ASSERT(!scriptRef.scriptData().isGhost());
+ MOZ_ASSERT(scriptRef.scriptData().hasSharedData());
+
+ // Lookup the gc-things range which are referenced by this script.
+ size_t offset = scriptRef.scriptData().gcThingsOffset.index;
+ size_t length = scriptRef.scriptData().gcThingsLength;
+ auto gcThingData = stencil.gcThingData.Subspan(offset, length);
+
+ // Iterate over gc-things of the script and queue inner functions.
+ for (TaggedScriptThingIndex index : mozilla::Reversed(gcThingData)) {
+ if (!index.isFunction()) {
+ continue;
+ }
+
+ ScriptIndex innerScriptIndex = index.toFunction();
+ ScriptStencilRef innerScriptRef{stencil, innerScriptIndex};
+ if (innerScriptRef.scriptData().isGhost() ||
+ !innerScriptRef.scriptData().functionFlags.isInterpreted()) {
+ continue;
+ }
+ if (innerScriptRef.scriptData().hasSharedData()) {
+ // The top-level parse decided to eagerly parse this function, thus we
+ // should visit its inner function the same way.
+ if (!add(fc, stencil, innerScriptIndex)) {
+ return false;
+ }
+ continue;
+ }
+
+ // Maybe insert the new script index in the queue of functions to delazify.
+ if (!insert(innerScriptIndex, innerScriptRef)) {
+ ReportOutOfMemory(fc);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+DelazifyStrategy::ScriptIndex LargeFirstDelazification::next() {
+ std::swap(heap.back(), heap[0]);
+ ScriptIndex result = heap.popCopy().second;
+
+ // NOTE: These are a heap indexes offseted by 1, such that we can manipulate
+ // the tree of heap-sorted values which bubble up the largest values towards
+ // the root of the tree.
+ size_t len = heap.length();
+ size_t i = 1;
+ while (true) {
+ // NOTE: We write (n + 1) - 1, instead of n, to explicit that the
+ // manipualted indexes are all offseted by 1.
+ size_t n = 2 * i;
+ size_t largest;
+ if (n + 1 <= len && heap[(n + 1) - 1].first > heap[n - 1].first) {
+ largest = n + 1;
+ } else if (n <= len) {
+ // The condition is n <= len in case n + 1 is out of the heap vector, but
+ // not n, in which case we still want to check if the last element of the
+ // heap vector should be swapped. Otherwise heap[n - 1] represents a
+ // larger function than heap[(n + 1) - 1].
+ largest = n;
+ } else {
+ // n is out-side the heap vector, thus our element is already in a leaf
+ // position and would not be moved any more.
+ break;
+ }
+
+ if (heap[i - 1].first < heap[largest - 1].first) {
+ // We found a function which has a larger body as a child of the current
+ // element. we swap it with the current element, such that the largest
+ // element is closer to the root of the tree.
+ std::swap(heap[i - 1], heap[largest - 1]);
+ i = largest;
+ } else {
+ // The largest function found as a child of the current node is smaller
+ // than the current node's function size. The heap tree is now organized
+ // as expected.
+ break;
+ }
+ }
+
+ return result;
+}
+
+bool LargeFirstDelazification::insert(ScriptIndex index,
+ frontend::ScriptStencilRef& ref) {
+ const frontend::ScriptStencilExtra& extra = ref.scriptExtra();
+ SourceSize size = extra.extent.sourceEnd - extra.extent.sourceStart;
+ if (!heap.append(std::pair(size, index))) {
+ return false;
+ }
+
+ // NOTE: These are a heap indexes offseted by 1, such that we can manipulate
+ // the tree of heap-sorted values which bubble up the largest values towards
+ // the root of the tree.
+ size_t i = heap.length();
+ while (i > 1) {
+ if (heap[i - 1].first <= heap[(i / 2) - 1].first) {
+ return true;
+ }
+
+ std::swap(heap[i - 1], heap[(i / 2) - 1]);
+ i /= 2;
+ }
+
+ return true;
+}
+
+UniquePtr<DelazifyTask> DelazifyTask::Create(
+ JSRuntime* runtime, const JS::ContextOptions& contextOptions,
+ const JS::ReadOnlyCompileOptions& options,
+ const frontend::CompilationStencil& stencil) {
+ UniquePtr<DelazifyTask> task;
+ task.reset(js_new<DelazifyTask>(runtime, contextOptions));
+ if (!task) {
+ return nullptr;
+ }
+
+ AutoSetContextFrontendErrors recordErrors(&task->fc_);
+ RefPtr<ScriptSource> source(stencil.source);
+ StencilCache& cache = runtime->caches().delazificationCache;
+ if (!cache.startCaching(std::move(source))) {
+ return nullptr;
+ }
+
+ // Clone the extensible stencil to be used for eager delazification.
+ auto initial = task->fc_.getAllocator()
+ ->make_unique<frontend::ExtensibleCompilationStencil>(
+ options, stencil.source);
+ if (!initial || !initial->cloneFrom(&task->fc_, stencil)) {
+ // In case of errors, skip this and delazify on-demand.
+ return nullptr;
+ }
+
+ if (!task->init(options, std::move(initial))) {
+ // In case of errors, skip this and delazify on-demand.
+ return nullptr;
+ }
+
+ return task;
+}
+
+DelazifyTask::DelazifyTask(JSRuntime* runtime,
+ const JS::ContextOptions& options)
+ : runtime(runtime), contextOptions(options), merger() {}
+
+DelazifyTask::~DelazifyTask() {
+ // The LinkedListElement destructor will remove us from any list we are part
+ // of without synchronization, so ensure that doesn't happen.
+ MOZ_DIAGNOSTIC_ASSERT(!isInList());
+}
+
+bool DelazifyTask::init(
+ const JS::ReadOnlyCompileOptions& options,
+ UniquePtr<frontend::ExtensibleCompilationStencil>&& initial) {
+ using namespace js::frontend;
+
+ if (!fc_.allocateOwnedPool()) {
+ return false;
+ }
+
+ if (!merger.setInitial(&fc_, std::move(initial))) {
+ return false;
+ }
+
+ switch (options.eagerDelazificationStrategy()) {
+ case JS::DelazificationOption::OnDemandOnly:
+ // OnDemandOnly will parse function as they are require to continue the
+ // execution on the main thread.
+ MOZ_CRASH("OnDemandOnly should not create a DelazifyTask.");
+ break;
+ case JS::DelazificationOption::CheckConcurrentWithOnDemand:
+ case JS::DelazificationOption::ConcurrentDepthFirst:
+ // ConcurrentDepthFirst visit all functions to be delazified, visiting the
+ // inner functions before the siblings functions.
+ strategy = fc_.getAllocator()->make_unique<DepthFirstDelazification>();
+ break;
+ case JS::DelazificationOption::ConcurrentLargeFirst:
+ // ConcurrentLargeFirst visit all functions to be delazified, visiting the
+ // largest function first.
+ strategy = fc_.getAllocator()->make_unique<LargeFirstDelazification>();
+ break;
+ case JS::DelazificationOption::ParseEverythingEagerly:
+ // ParseEverythingEagerly parse all functions eagerly, thus leaving no
+ // functions to be parsed on demand.
+ MOZ_CRASH("ParseEverythingEagerly should not create a DelazifyTask");
+ break;
+ }
+
+ if (!strategy) {
+ return false;
+ }
+
+ // Queue functions from the top-level to be delazify.
+ BorrowingCompilationStencil borrow(merger.getResult());
+ ScriptIndex topLevel{0};
+ return strategy->add(&fc_, borrow, topLevel);
+}
+
+size_t DelazifyTask::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t mergerSize = merger.getResult().sizeOfIncludingThis(mallocSizeOf);
+ return mergerSize;
+}
+
+void DelazifyTask::runHelperThreadTask(AutoLockHelperThreadState& lock) {
+ {
+ AutoSetHelperThreadContext usesContext(contextOptions, lock);
+ AutoUnlockHelperThreadState unlock(lock);
+ JSContext* cx = TlsContext.get();
+ if (!runTask(cx)) {
+ // NOTE: We do not report errors beyond this scope, as there is no where
+ // to report these errors to. In the mean time, prevent the eager
+ // delazification from running after any kind of errors.
+ strategy->clear();
+ }
+ MOZ_ASSERT(cx->tempLifoAlloc().isEmpty());
+ cx->tempLifoAlloc().freeAll();
+ cx->frontendCollectionPool().purge();
+ fc_.nameCollectionPool().purge();
+ }
+
+ // If we should continue to delazify even more functions, then re-add this
+ // task to the vector of delazification tasks. This might happen when the
+ // DelazifyTask is interrupted by a higher priority task. (see
+ // mozilla::TaskController & mozilla::Task)
+ if (!strategy->done()) {
+ HelperThreadState().submitTask(this, lock);
+ } else {
+ UniquePtr<FreeDelazifyTask> freeTask(js_new<FreeDelazifyTask>(this));
+ if (freeTask) {
+ HelperThreadState().submitTask(std::move(freeTask), lock);
+ }
+ }
+}
+
+bool DelazifyTask::runTask(JSContext* cx) {
+ fc_.setStackQuota(HelperThreadState().stackQuota);
+
+ AutoSetContextRuntime ascr(runtime);
+ AutoSetContextFrontendErrors recordErrors(&this->fc_);
+
+ using namespace js::frontend;
+
+ // Create a scope-binding cache dedicated to this Delazification task. The
+ // memory would be reclaimed if the task is interrupted or if all
+ // delazification are completed.
+ //
+ // We do not use the one from the JSContext/Runtime, as it is not thread safe
+ // to use it, as it could be purged by a GC in the mean time.
+ StencilScopeBindingCache scopeCache(merger);
+
+ while (!strategy->done() || isInterrupted()) {
+ RefPtr<CompilationStencil> innerStencil;
+ ScriptIndex scriptIndex = strategy->next();
+ {
+ BorrowingCompilationStencil borrow(merger.getResult());
+
+ // Take the next inner function to be delazified.
+ ScriptStencilRef scriptRef{borrow, scriptIndex};
+ MOZ_ASSERT(!scriptRef.scriptData().isGhost());
+ MOZ_ASSERT(!scriptRef.scriptData().hasSharedData());
+
+ // Parse and generate bytecode for the inner function.
+ innerStencil = DelazifyCanonicalScriptedFunction(cx, &fc_, &scopeCache,
+ borrow, scriptIndex);
+ if (!innerStencil) {
+ return false;
+ }
+
+ // Add the generated stencil to the cache, to be consumed by the main
+ // thread.
+ StencilCache& cache = runtime->caches().delazificationCache;
+ StencilContext key(borrow.source, scriptRef.scriptExtra().extent);
+ if (auto guard = cache.isSourceCached(borrow.source)) {
+ if (!cache.putNew(guard, key, innerStencil.get())) {
+ ReportOutOfMemory(&fc_);
+ return false;
+ }
+ } else {
+ // Stencils for this source are no longer accepted in the cache, thus
+ // there is no reason to keep our eager delazification going.
+ strategy->clear();
+ return true;
+ }
+ }
+
+ // We are merging the delazification now, while this could be post-poned
+ // until we have to look at inner functions, this is simpler to do it now
+ // than querying the cache for every enclosing script.
+ if (!merger.addDelazification(&this->fc_, *innerStencil)) {
+ return false;
+ }
+
+ {
+ BorrowingCompilationStencil borrow(merger.getResult());
+ if (!strategy->add(&fc_, borrow, scriptIndex)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+void FreeDelazifyTask::runHelperThreadTask(AutoLockHelperThreadState& locked) {
+ {
+ AutoUnlockHelperThreadState unlock(locked);
+ js_delete(task);
+ task = nullptr;
+ }
+
+ js_delete(this);
+}
+
+static void WaitForOffThreadParses(JSRuntime* rt,
+ AutoLockHelperThreadState& lock) {
+ if (!HelperThreadState().isInitialized(lock)) {
+ return;
+ }
+
+ GlobalHelperThreadState::ParseTaskVector& worklist =
+ HelperThreadState().parseWorklist(lock);
+
+ while (true) {
+ bool pending = false;
+ for (const auto& task : worklist) {
+ if (task->runtimeMatches(rt)) {
+ pending = true;
+ break;
+ }
+ }
+ if (!pending) {
+ bool inProgress = false;
+ for (auto* helper : HelperThreadState().helperTasks(lock)) {
+ if (helper->is<ParseTask>() &&
+ helper->as<ParseTask>()->runtimeMatches(rt)) {
+ inProgress = true;
+ break;
+ }
+ }
+ if (!inProgress) {
+ break;
+ }
+ }
+ HelperThreadState().wait(lock);
+ }
+
+#ifdef DEBUG
+ for (const auto& task : worklist) {
+ MOZ_ASSERT(!task->runtimeMatches(rt));
+ }
+ for (auto* helper : HelperThreadState().helperTasks(lock)) {
+ MOZ_ASSERT_IF(helper->is<ParseTask>(),
+ !helper->as<ParseTask>()->runtimeMatches(rt));
+ }
+#endif
+}
+
+void js::CancelOffThreadParses(JSRuntime* rt) {
+ AutoLockHelperThreadState lock;
+
+ // Instead of forcibly canceling pending parse tasks, just wait for all
+ // scheduled and in progress ones to complete. Otherwise the final GC may not
+ // collect everything due to zones being used off thread.
+ WaitForOffThreadParses(rt, lock);
+
+ // Clean up any parse tasks which haven't been finished by the main thread.
+ auto& finished = HelperThreadState().parseFinishedList(lock);
+ while (true) {
+ bool found = false;
+ ParseTask* next;
+ ParseTask* task = finished.getFirst();
+ while (task) {
+ next = task->getNext();
+ if (task->runtimeMatches(rt)) {
+ found = true;
+ task->remove();
+ HelperThreadState().destroyParseTask(rt, task);
+ }
+ task = next;
+ }
+ if (!found) {
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ for (ParseTask* task : finished) {
+ MOZ_ASSERT(!task->runtimeMatches(rt));
+ }
+#endif
+}
+
+static void CancelPendingDelazifyTask(JSRuntime* rt,
+ AutoLockHelperThreadState& lock) {
+ auto& delazifyList = HelperThreadState().delazifyWorklist(lock);
+
+ auto end = delazifyList.end();
+ for (auto iter = delazifyList.begin(); iter != end;) {
+ DelazifyTask* task = *iter;
+ ++iter;
+ if (task->runtimeMatches(rt)) {
+ task->removeFrom(delazifyList);
+ js_delete(task);
+ }
+ }
+}
+
+static void WaitUntilCancelledDelazifyTasks(JSRuntime* rt,
+ AutoLockHelperThreadState& lock) {
+ if (!HelperThreadState().isInitialized(lock)) {
+ return;
+ }
+
+ while (true) {
+ CancelPendingDelazifyTask(rt, lock);
+
+ // If running tasks are delazifying any functions, then we have to wait
+ // until they complete to remove them from the pending list. DelazifyTask
+ // are inserting themself back to be processed once more after delazifying a
+ // function.
+ bool inProgress = false;
+ for (auto* helper : HelperThreadState().helperTasks(lock)) {
+ if (helper->is<DelazifyTask>() &&
+ helper->as<DelazifyTask>()->runtimeMatches(rt)) {
+ inProgress = true;
+ break;
+ }
+ }
+ if (!inProgress) {
+ break;
+ }
+
+ HelperThreadState().wait(lock);
+ }
+
+#ifdef DEBUG
+ for (DelazifyTask* task : HelperThreadState().delazifyWorklist(lock)) {
+ MOZ_ASSERT(!task->runtimeMatches(rt));
+ }
+ for (auto* helper : HelperThreadState().helperTasks(lock)) {
+ MOZ_ASSERT_IF(helper->is<DelazifyTask>(),
+ !helper->as<DelazifyTask>()->runtimeMatches(rt));
+ }
+#endif
+}
+
+static void WaitUntilEmptyFreeDelazifyTaskVector(
+ AutoLockHelperThreadState& lock) {
+ if (!HelperThreadState().isInitialized(lock)) {
+ return;
+ }
+
+ while (true) {
+ bool inProgress = false;
+ auto& freeList = HelperThreadState().freeDelazifyTaskVector(lock);
+ if (!freeList.empty()) {
+ inProgress = true;
+ }
+
+ // If running tasks are delazifying any functions, then we have to wait
+ // until they complete to remove them from the pending list. DelazifyTask
+ // are inserting themself back to be processed once more after delazifying a
+ // function.
+ for (auto* helper : HelperThreadState().helperTasks(lock)) {
+ if (helper->is<FreeDelazifyTask>()) {
+ inProgress = true;
+ break;
+ }
+ }
+ if (!inProgress) {
+ break;
+ }
+
+ HelperThreadState().wait(lock);
+ }
+}
+
+void js::CancelOffThreadDelazify(JSRuntime* runtime) {
+ AutoLockHelperThreadState lock;
+
+ // Cancel all Delazify tasks from the given runtime, and wait if tasks are
+ // from the given runtime are being executed.
+ WaitUntilCancelledDelazifyTasks(runtime, lock);
+
+ // Empty the free list of delazify task, in case one of the delazify task
+ // ended and therefore did not returned to the pending list of delazify tasks.
+ WaitUntilEmptyFreeDelazifyTaskVector(lock);
+}
+
+static bool HasAnyDelazifyTask(JSRuntime* rt, AutoLockHelperThreadState& lock) {
+ auto& delazifyList = HelperThreadState().delazifyWorklist(lock);
+ for (auto task : delazifyList) {
+ if (task->runtimeMatches(rt)) {
+ return true;
+ }
+ }
+
+ for (auto* helper : HelperThreadState().helperTasks(lock)) {
+ if (helper->is<DelazifyTask>() &&
+ helper->as<DelazifyTask>()->runtimeMatches(rt)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void js::WaitForAllDelazifyTasks(JSRuntime* rt) {
+ AutoLockHelperThreadState lock;
+ if (!HelperThreadState().isInitialized(lock)) {
+ return;
+ }
+
+ while (true) {
+ if (!HasAnyDelazifyTask(rt, lock)) {
+ break;
+ }
+
+ HelperThreadState().wait(lock);
+ }
+}
+
+static bool QueueOffThreadParseTask(JSContext* cx, UniquePtr<ParseTask> task) {
+ AutoLockHelperThreadState lock;
+
+ bool result =
+ HelperThreadState().submitTask(cx->runtime(), std::move(task), lock);
+
+ if (!result) {
+ ReportOutOfMemory(cx);
+ }
+ return result;
+}
+
+bool GlobalHelperThreadState::submitTask(
+ JSRuntime* rt, UniquePtr<ParseTask> task,
+ const AutoLockHelperThreadState& locked) {
+ if (!parseWorklist(locked).append(std::move(task))) {
+ return false;
+ }
+
+ dispatch(DispatchReason::NewTask, locked);
+ return true;
+}
+
+void GlobalHelperThreadState::submitTask(
+ DelazifyTask* task, const AutoLockHelperThreadState& locked) {
+ delazifyWorklist(locked).insertBack(task);
+ dispatch(DispatchReason::NewTask, locked);
+}
+
+bool GlobalHelperThreadState::submitTask(
+ UniquePtr<FreeDelazifyTask> task, const AutoLockHelperThreadState& locked) {
+ if (!freeDelazifyTaskVector(locked).append(std::move(task))) {
+ return false;
+ }
+ dispatch(DispatchReason::NewTask, locked);
+ return true;
+}
+
+static JS::OffThreadToken* StartOffThreadParseTask(
+ JSContext* cx, UniquePtr<ParseTask> task,
+ const ReadOnlyCompileOptions& options) {
+ // Suppress GC so that calls below do not trigger a new incremental GC
+ // which could require barriers on the atoms zone.
+ gc::AutoSuppressGC nogc(cx);
+
+ if (!task->init(cx, options)) {
+ return nullptr;
+ }
+
+ JS::OffThreadToken* token = task.get();
+ if (!QueueOffThreadParseTask(cx, std::move(task))) {
+ return nullptr;
+ }
+
+ // Return an opaque pointer to caller so that it may query/cancel the task
+ // before the callback is fired.
+ return token;
+}
+
+template <typename Unit>
+static JS::OffThreadToken* StartOffThreadCompileToStencilInternal(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ JS::SourceText<Unit>& srcBuf, JS::OffThreadCompileCallback callback,
+ void* callbackData) {
+ auto task = cx->make_unique<CompileToStencilTask<Unit>>(cx, srcBuf, callback,
+ callbackData);
+ if (!task) {
+ return nullptr;
+ }
+
+ return StartOffThreadParseTask(cx, std::move(task), options);
+}
+
+JS::OffThreadToken* js::StartOffThreadCompileToStencil(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ JS::SourceText<char16_t>& srcBuf, JS::OffThreadCompileCallback callback,
+ void* callbackData) {
+ return StartOffThreadCompileToStencilInternal(cx, options, srcBuf, callback,
+ callbackData);
+}
+
+JS::OffThreadToken* js::StartOffThreadCompileToStencil(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ JS::SourceText<Utf8Unit>& srcBuf, JS::OffThreadCompileCallback callback,
+ void* callbackData) {
+ return StartOffThreadCompileToStencilInternal(cx, options, srcBuf, callback,
+ callbackData);
+}
+
+template <typename Unit>
+static JS::OffThreadToken* StartOffThreadCompileModuleToStencilInternal(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ JS::SourceText<Unit>& srcBuf, JS::OffThreadCompileCallback callback,
+ void* callbackData) {
+ auto task = cx->make_unique<CompileModuleToStencilTask<Unit>>(
+ cx, srcBuf, callback, callbackData);
+ if (!task) {
+ return nullptr;
+ }
+
+ return StartOffThreadParseTask(cx, std::move(task), options);
+}
+
+JS::OffThreadToken* js::StartOffThreadCompileModuleToStencil(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ JS::SourceText<char16_t>& srcBuf, JS::OffThreadCompileCallback callback,
+ void* callbackData) {
+ return StartOffThreadCompileModuleToStencilInternal(cx, options, srcBuf,
+ callback, callbackData);
+}
+
+JS::OffThreadToken* js::StartOffThreadCompileModuleToStencil(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ JS::SourceText<Utf8Unit>& srcBuf, JS::OffThreadCompileCallback callback,
+ void* callbackData) {
+ return StartOffThreadCompileModuleToStencilInternal(cx, options, srcBuf,
+ callback, callbackData);
+}
+
+JS::OffThreadToken* js::StartOffThreadDecodeStencil(
+ JSContext* cx, const JS::DecodeOptions& options,
+ const JS::TranscodeRange& range, JS::OffThreadCompileCallback callback,
+ void* callbackData) {
+ auto task =
+ cx->make_unique<DecodeStencilTask>(cx, range, callback, callbackData);
+ if (!task) {
+ return nullptr;
+ }
+
+ JS::CompileOptions compileOptions(cx);
+ options.copyTo(compileOptions);
+
+ return StartOffThreadParseTask(cx, std::move(task), compileOptions);
+}
+
+JS::OffThreadToken* js::StartOffThreadDecodeMultiStencils(
+ JSContext* cx, const JS::DecodeOptions& options,
+ JS::TranscodeSources& sources, JS::OffThreadCompileCallback callback,
+ void* callbackData) {
+ auto task = cx->make_unique<MultiStencilsDecodeTask>(cx, sources, callback,
+ callbackData);
+ if (!task) {
+ return nullptr;
+ }
+
+ JS::CompileOptions compileOptions(cx);
+ options.copyTo(compileOptions);
+
+ return StartOffThreadParseTask(cx, std::move(task), compileOptions);
+}
+
+bool js::CurrentThreadIsParseThread() {
+ JSContext* cx = TlsContext.get();
+ // Check whether this is a ParseTask or a DelazifyTask.
+ return cx && cx->isHelperThreadContext() && cx->frontendErrors();
+}
+
+bool GlobalHelperThreadState::ensureInitialized() {
+ MOZ_ASSERT(CanUseExtraThreads());
+ MOZ_ASSERT(this == &HelperThreadState());
+
+ AutoLockHelperThreadState lock;
+
+ if (isInitialized(lock)) {
+ return true;
+ }
+
+ for (size_t& i : runningTaskCount) {
+ i = 0;
+ }
+
+ useInternalThreadPool_ = !dispatchTaskCallback;
+ if (useInternalThreadPool(lock)) {
+ if (!InternalThreadPool::Initialize(threadCount, lock)) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(dispatchTaskCallback);
+
+ if (!ensureThreadCount(threadCount, lock)) {
+ finishThreads(lock);
+ return false;
+ }
+
+ MOZ_ASSERT(threadCount != 0);
+ isInitialized_ = true;
+ return true;
+}
+
+bool GlobalHelperThreadState::ensureThreadCount(
+ size_t count, AutoLockHelperThreadState& lock) {
+ if (!helperTasks_.reserve(count)) {
+ return false;
+ }
+
+ if (useInternalThreadPool(lock)) {
+ InternalThreadPool& pool = InternalThreadPool::Get();
+ if (pool.threadCount(lock) < count) {
+ if (!pool.ensureThreadCount(count, lock)) {
+ return false;
+ }
+
+ threadCount = pool.threadCount(lock);
+ }
+ }
+
+ return true;
+}
+
+GlobalHelperThreadState::GlobalHelperThreadState()
+ : cpuCount(0),
+ threadCount(0),
+ totalCountRunningTasks(0),
+ registerThread(nullptr),
+ unregisterThread(nullptr),
+ wasmTier2GeneratorsFinished_(0) {
+ MOZ_ASSERT(!gHelperThreadState);
+
+ cpuCount = ClampDefaultCPUCount(GetCPUCount());
+ threadCount = ThreadCountForCPUCount(cpuCount);
+ gcParallelThreadCount = threadCount;
+
+ MOZ_ASSERT(cpuCount > 0, "GetCPUCount() seems broken");
+}
+
+void GlobalHelperThreadState::finish(AutoLockHelperThreadState& lock) {
+ if (!isInitialized(lock)) {
+ return;
+ }
+
+ finishThreads(lock);
+
+ // Make sure there are no Ion free tasks left. We check this here because,
+ // unlike the other tasks, we don't explicitly block on this when
+ // destroying a runtime.
+ auto& freeList = ionFreeList(lock);
+ while (!freeList.empty()) {
+ UniquePtr<jit::IonFreeTask> task = std::move(freeList.back());
+ freeList.popBack();
+ jit::FreeIonCompileTask(task->compileTask());
+ }
+
+ destroyHelperContexts(lock);
+}
+
+void GlobalHelperThreadState::finishThreads(AutoLockHelperThreadState& lock) {
+ waitForAllTasksLocked(lock);
+ terminating_ = true;
+
+ if (InternalThreadPool::IsInitialized()) {
+ InternalThreadPool::ShutDown(lock);
+ }
+}
+
+JSContext* GlobalHelperThreadState::getFirstUnusedContext(
+ AutoLockHelperThreadState& locked) {
+ for (auto& cx : helperContexts_) {
+ if (cx->contextAvailable(locked)) {
+ return cx;
+ }
+ }
+
+ MOZ_ASSERT(helperContexts_.length() < threadCount);
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ auto cx = js::MakeUnique<JSContext>(nullptr, JS::ContextOptions());
+ if (!cx || !cx->init(ContextKind::HelperThread) ||
+ !helperContexts_.append(cx.get())) {
+ oomUnsafe.crash("GlobalHelperThreadState::getFirstUnusedContext");
+ }
+
+ return cx.release();
+}
+
+void GlobalHelperThreadState::destroyHelperContexts(
+ AutoLockHelperThreadState& lock) {
+ while (helperContexts_.length() > 0) {
+ js_delete(helperContexts_.popCopy());
+ }
+}
+
+#ifdef DEBUG
+void GlobalHelperThreadState::assertIsLockedByCurrentThread() const {
+ gHelperThreadLock.assertOwnedByCurrentThread();
+}
+#endif // DEBUG
+
+void GlobalHelperThreadState::dispatch(
+ DispatchReason reason, const AutoLockHelperThreadState& locked) {
+ if (canStartTasks(locked) && tasksPending_ < threadCount) {
+ // This doesn't guarantee that we don't dispatch more tasks to the external
+ // pool than necessary if tasks are taking a long time to start, but it does
+ // limit the number.
+ tasksPending_++;
+
+ // The hazard analysis can't tell that the callback doesn't GC.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ dispatchTaskCallback(reason);
+ }
+}
+
+void GlobalHelperThreadState::wait(
+ AutoLockHelperThreadState& locked,
+ TimeDuration timeout /* = TimeDuration::Forever() */) {
+ consumerWakeup.wait_for(locked, timeout);
+}
+
+void GlobalHelperThreadState::notifyAll(const AutoLockHelperThreadState&) {
+ consumerWakeup.notify_all();
+}
+
+void GlobalHelperThreadState::notifyOne(const AutoLockHelperThreadState&) {
+ consumerWakeup.notify_one();
+}
+
+bool GlobalHelperThreadState::hasActiveThreads(
+ const AutoLockHelperThreadState& lock) {
+ return !helperTasks(lock).empty();
+}
+
+void js::WaitForAllHelperThreads() { HelperThreadState().waitForAllTasks(); }
+
+void js::WaitForAllHelperThreads(AutoLockHelperThreadState& lock) {
+ HelperThreadState().waitForAllTasksLocked(lock);
+}
+
+void GlobalHelperThreadState::waitForAllTasks() {
+ AutoLockHelperThreadState lock;
+ waitForAllTasksLocked(lock);
+}
+
+void GlobalHelperThreadState::waitForAllTasksLocked(
+ AutoLockHelperThreadState& lock) {
+ CancelOffThreadWasmTier2GeneratorLocked(lock);
+
+ while (canStartTasks(lock) || tasksPending_ || hasActiveThreads(lock)) {
+ wait(lock);
+ }
+
+ MOZ_ASSERT(gcParallelWorklist().isEmpty(lock));
+ MOZ_ASSERT(ionWorklist(lock).empty());
+ MOZ_ASSERT(wasmWorklist(lock, wasm::CompileMode::Tier1).empty());
+ MOZ_ASSERT(promiseHelperTasks(lock).empty());
+ MOZ_ASSERT(parseWorklist(lock).empty());
+ MOZ_ASSERT(compressionWorklist(lock).empty());
+ MOZ_ASSERT(ionFreeList(lock).empty());
+ MOZ_ASSERT(wasmWorklist(lock, wasm::CompileMode::Tier2).empty());
+ MOZ_ASSERT(wasmTier2GeneratorWorklist(lock).empty());
+ MOZ_ASSERT(!tasksPending_);
+ MOZ_ASSERT(!hasActiveThreads(lock));
+}
+
+// A task can be a "master" task, ie, it will block waiting for other worker
+// threads that perform work on its behalf. If so it must not take the last
+// available thread; there must always be at least one worker thread able to do
+// the actual work. (Or the system may deadlock.)
+//
+// If a task is a master task it *must* pass isMaster=true here, or perform a
+// similar calculation to avoid deadlock from starvation.
+//
+// isMaster should only be true if the thread calling checkTaskThreadLimit() is
+// a helper thread.
+//
+// NOTE: Calling checkTaskThreadLimit() from a helper thread in the dynamic
+// region after currentTask.emplace() and before currentTask.reset() may cause
+// it to return a different result than if it is called outside that dynamic
+// region, as the predicate inspects the values of the threads' currentTask
+// members.
+
+bool GlobalHelperThreadState::checkTaskThreadLimit(
+ ThreadType threadType, size_t maxThreads, bool isMaster,
+ const AutoLockHelperThreadState& lock) const {
+ MOZ_ASSERT(maxThreads > 0);
+
+ if (!isMaster && maxThreads >= threadCount) {
+ return true;
+ }
+
+ size_t count = runningTaskCount[threadType];
+ if (count >= maxThreads) {
+ return false;
+ }
+
+ MOZ_ASSERT(threadCount >= totalCountRunningTasks);
+ size_t idle = threadCount - totalCountRunningTasks;
+
+ // It is possible for the number of idle threads to be zero here, because
+ // checkTaskThreadLimit() can be called from non-helper threads. Notably,
+ // the compression task scheduler invokes it, and runs off a helper thread.
+ if (idle == 0) {
+ return false;
+ }
+
+ // A master thread that's the last available thread must not be allowed to
+ // run.
+ if (isMaster && idle == 1) {
+ return false;
+ }
+
+ return true;
+}
+
+void GlobalHelperThreadState::triggerFreeUnusedMemory() {
+ if (!CanUseExtraThreads()) {
+ return;
+ }
+
+ AutoLockHelperThreadState lock;
+ for (auto& context : helperContexts_) {
+ if (context->shouldFreeUnusedMemory() && context->contextAvailable(lock)) {
+ // This context hasn't been used since the last time freeUnusedMemory
+ // was set. Free the temp LifoAlloc from the main thread.
+ context->tempLifoAllocNoCheck().freeAll();
+ context->setFreeUnusedMemory(false);
+ } else {
+ context->setFreeUnusedMemory(true);
+ }
+ }
+}
+
+static inline bool IsHelperThreadSimulatingOOM(js::ThreadType threadType) {
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ return js::oom::simulator.targetThread() == threadType;
+#else
+ return false;
+#endif
+}
+
+void GlobalHelperThreadState::addSizeOfIncludingThis(
+ JS::GlobalStats* stats, const AutoLockHelperThreadState& lock) const {
+#ifdef DEBUG
+ assertIsLockedByCurrentThread();
+#endif
+
+ mozilla::MallocSizeOf mallocSizeOf = stats->mallocSizeOf_;
+ JS::HelperThreadStats& htStats = stats->helperThread;
+
+ htStats.stateData += mallocSizeOf(this);
+
+ if (InternalThreadPool::IsInitialized()) {
+ htStats.stateData +=
+ InternalThreadPool::Get().sizeOfIncludingThis(mallocSizeOf, lock);
+ }
+
+ // Report memory used by various containers
+ htStats.stateData +=
+ ionWorklist_.sizeOfExcludingThis(mallocSizeOf) +
+ ionFinishedList_.sizeOfExcludingThis(mallocSizeOf) +
+ ionFreeList_.sizeOfExcludingThis(mallocSizeOf) +
+ wasmWorklist_tier1_.sizeOfExcludingThis(mallocSizeOf) +
+ wasmWorklist_tier2_.sizeOfExcludingThis(mallocSizeOf) +
+ wasmTier2GeneratorWorklist_.sizeOfExcludingThis(mallocSizeOf) +
+ promiseHelperTasks_.sizeOfExcludingThis(mallocSizeOf) +
+ parseWorklist_.sizeOfExcludingThis(mallocSizeOf) +
+ parseFinishedList_.sizeOfExcludingThis(mallocSizeOf) +
+ compressionPendingList_.sizeOfExcludingThis(mallocSizeOf) +
+ compressionWorklist_.sizeOfExcludingThis(mallocSizeOf) +
+ compressionFinishedList_.sizeOfExcludingThis(mallocSizeOf) +
+ gcParallelWorklist_.sizeOfExcludingThis(mallocSizeOf, lock) +
+ helperContexts_.sizeOfExcludingThis(mallocSizeOf) +
+ helperTasks_.sizeOfExcludingThis(mallocSizeOf);
+
+ // Report ParseTasks on wait lists
+ for (const auto& task : parseWorklist_) {
+ htStats.parseTask += task->sizeOfIncludingThis(mallocSizeOf);
+ }
+ for (auto task : parseFinishedList_) {
+ htStats.parseTask += task->sizeOfIncludingThis(mallocSizeOf);
+ }
+
+ // Report IonCompileTasks on wait lists
+ for (auto task : ionWorklist_) {
+ htStats.ionCompileTask += task->sizeOfExcludingThis(mallocSizeOf);
+ }
+ for (auto task : ionFinishedList_) {
+ htStats.ionCompileTask += task->sizeOfExcludingThis(mallocSizeOf);
+ }
+ for (const auto& task : ionFreeList_) {
+ htStats.ionCompileTask +=
+ task->compileTask()->sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ // Report wasm::CompileTasks on wait lists
+ for (auto task : wasmWorklist_tier1_) {
+ htStats.wasmCompile += task->sizeOfExcludingThis(mallocSizeOf);
+ }
+ for (auto task : wasmWorklist_tier2_) {
+ htStats.wasmCompile += task->sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ {
+ // Report memory used by the JSContexts.
+ // We're holding the helper state lock, and the JSContext memory reporter
+ // won't do anything more substantial than traversing data structures and
+ // getting their size, so disable ProtectedData checks.
+ AutoNoteSingleThreadedRegion anstr;
+ for (auto* cx : helperContexts_) {
+ htStats.contexts += cx->sizeOfIncludingThis(mallocSizeOf);
+ }
+ }
+
+ // Report number of helper threads.
+ MOZ_ASSERT(htStats.idleThreadCount == 0);
+ MOZ_ASSERT(threadCount >= totalCountRunningTasks);
+ htStats.activeThreadCount = totalCountRunningTasks;
+ htStats.idleThreadCount = threadCount - totalCountRunningTasks;
+}
+
+size_t GlobalHelperThreadState::maxIonCompilationThreads() const {
+ if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_ION)) {
+ return 1;
+ }
+ return threadCount;
+}
+
+size_t GlobalHelperThreadState::maxWasmCompilationThreads() const {
+ if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_WASM_COMPILE_TIER1) ||
+ IsHelperThreadSimulatingOOM(js::THREAD_TYPE_WASM_COMPILE_TIER2)) {
+ return 1;
+ }
+ return std::min(cpuCount, threadCount);
+}
+
+size_t GlobalHelperThreadState::maxWasmTier2GeneratorThreads() const {
+ return MaxTier2GeneratorTasks;
+}
+
+size_t GlobalHelperThreadState::maxPromiseHelperThreads() const {
+ if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_WASM_COMPILE_TIER1) ||
+ IsHelperThreadSimulatingOOM(js::THREAD_TYPE_WASM_COMPILE_TIER2)) {
+ return 1;
+ }
+ return std::min(cpuCount, threadCount);
+}
+
+size_t GlobalHelperThreadState::maxParseThreads() const {
+ if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_PARSE)) {
+ return 1;
+ }
+ return std::min(cpuCount, threadCount);
+}
+
+size_t GlobalHelperThreadState::maxCompressionThreads() const {
+ if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_COMPRESS)) {
+ return 1;
+ }
+
+ // Compression is triggered on major GCs to compress ScriptSources. It is
+ // considered low priority work.
+ return 1;
+}
+
+size_t GlobalHelperThreadState::maxGCParallelThreads(
+ const AutoLockHelperThreadState& lock) const {
+ if (IsHelperThreadSimulatingOOM(js::THREAD_TYPE_GCPARALLEL)) {
+ return 1;
+ }
+ return gcParallelThreadCount;
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetWasmTier1CompileTask(
+ const AutoLockHelperThreadState& lock) {
+ return maybeGetWasmCompile(lock, wasm::CompileMode::Tier1);
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetWasmTier2CompileTask(
+ const AutoLockHelperThreadState& lock) {
+ return maybeGetWasmCompile(lock, wasm::CompileMode::Tier2);
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetWasmCompile(
+ const AutoLockHelperThreadState& lock, wasm::CompileMode mode) {
+ if (!canStartWasmCompile(lock, mode)) {
+ return nullptr;
+ }
+
+ return wasmWorklist(lock, mode).popCopyFront();
+}
+
+bool GlobalHelperThreadState::canStartWasmTier1CompileTask(
+ const AutoLockHelperThreadState& lock) {
+ return canStartWasmCompile(lock, wasm::CompileMode::Tier1);
+}
+
+bool GlobalHelperThreadState::canStartWasmTier2CompileTask(
+ const AutoLockHelperThreadState& lock) {
+ return canStartWasmCompile(lock, wasm::CompileMode::Tier2);
+}
+
+bool GlobalHelperThreadState::canStartWasmCompile(
+ const AutoLockHelperThreadState& lock, wasm::CompileMode mode) {
+ if (wasmWorklist(lock, mode).empty()) {
+ return false;
+ }
+
+ // Parallel compilation and background compilation should be disabled on
+ // unicore systems.
+
+ MOZ_RELEASE_ASSERT(cpuCount > 1);
+
+ // If Tier2 is very backlogged we must give priority to it, since the Tier2
+ // queue holds onto Tier1 tasks. Indeed if Tier2 is backlogged we will
+ // devote more resources to Tier2 and not start any Tier1 work at all.
+
+ bool tier2oversubscribed = wasmTier2GeneratorWorklist(lock).length() > 20;
+
+ // For Tier1 and Once compilation, honor the maximum allowed threads to
+ // compile wasm jobs at once, to avoid oversaturating the machine.
+ //
+ // For Tier2 compilation we need to allow other things to happen too, so we
+ // do not allow all logical cores to be used for background work; instead we
+ // wish to use a fraction of the physical cores. We can't directly compute
+ // the physical cores from the logical cores, but 1/3 of the logical cores
+ // is a safe estimate for the number of physical cores available for
+ // background work.
+
+ size_t physCoresAvailable = size_t(ceil(cpuCount / 3.0));
+
+ size_t threads;
+ ThreadType threadType;
+ if (mode == wasm::CompileMode::Tier2) {
+ if (tier2oversubscribed) {
+ threads = maxWasmCompilationThreads();
+ } else {
+ threads = physCoresAvailable;
+ }
+ threadType = THREAD_TYPE_WASM_COMPILE_TIER2;
+ } else {
+ if (tier2oversubscribed) {
+ threads = 0;
+ } else {
+ threads = maxWasmCompilationThreads();
+ }
+ threadType = THREAD_TYPE_WASM_COMPILE_TIER1;
+ }
+
+ return threads != 0 && checkTaskThreadLimit(threadType, threads, lock);
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetWasmTier2GeneratorTask(
+ const AutoLockHelperThreadState& lock) {
+ if (!canStartWasmTier2GeneratorTask(lock)) {
+ return nullptr;
+ }
+
+ return wasmTier2GeneratorWorklist(lock).popCopy();
+}
+
+bool GlobalHelperThreadState::canStartWasmTier2GeneratorTask(
+ const AutoLockHelperThreadState& lock) {
+ return !wasmTier2GeneratorWorklist(lock).empty() &&
+ checkTaskThreadLimit(THREAD_TYPE_WASM_GENERATOR_TIER2,
+ maxWasmTier2GeneratorThreads(),
+ /*isMaster=*/true, lock);
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetPromiseHelperTask(
+ const AutoLockHelperThreadState& lock) {
+ if (!canStartPromiseHelperTask(lock)) {
+ return nullptr;
+ }
+
+ return promiseHelperTasks(lock).popCopy();
+}
+
+bool GlobalHelperThreadState::canStartPromiseHelperTask(
+ const AutoLockHelperThreadState& lock) {
+ // PromiseHelperTasks can be wasm compilation tasks that in turn block on
+ // wasm compilation so set isMaster = true.
+ return !promiseHelperTasks(lock).empty() &&
+ checkTaskThreadLimit(THREAD_TYPE_PROMISE_TASK,
+ maxPromiseHelperThreads(),
+ /*isMaster=*/true, lock);
+}
+
+static bool IonCompileTaskHasHigherPriority(jit::IonCompileTask* first,
+ jit::IonCompileTask* second) {
+ // Return true if priority(first) > priority(second).
+ //
+ // This method can return whatever it wants, though it really ought to be a
+ // total order. The ordering is allowed to race (change on the fly), however.
+
+ // A higher warm-up counter indicates a higher priority.
+ jit::JitScript* firstJitScript = first->script()->jitScript();
+ jit::JitScript* secondJitScript = second->script()->jitScript();
+ return firstJitScript->warmUpCount() / first->script()->length() >
+ secondJitScript->warmUpCount() / second->script()->length();
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetIonCompileTask(
+ const AutoLockHelperThreadState& lock) {
+ if (!canStartIonCompileTask(lock)) {
+ return nullptr;
+ }
+
+ return highestPriorityPendingIonCompile(lock,
+ /* checkExecutionStatus */ true);
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetLowPrioIonCompileTask(
+ const AutoLockHelperThreadState& lock) {
+ if (!canStartIonCompileTask(lock)) {
+ return nullptr;
+ }
+
+ return highestPriorityPendingIonCompile(lock,
+ /* checkExecutionStatus */ false);
+}
+
+bool GlobalHelperThreadState::canStartIonCompileTask(
+ const AutoLockHelperThreadState& lock) {
+ return !ionWorklist(lock).empty() &&
+ checkTaskThreadLimit(THREAD_TYPE_ION, maxIonCompilationThreads(),
+ lock);
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetIonFreeTask(
+ const AutoLockHelperThreadState& lock) {
+ if (!canStartIonFreeTask(lock)) {
+ return nullptr;
+ }
+
+ UniquePtr<jit::IonFreeTask> task = std::move(ionFreeList(lock).back());
+ ionFreeList(lock).popBack();
+ return task.release();
+}
+
+bool GlobalHelperThreadState::canStartIonFreeTask(
+ const AutoLockHelperThreadState& lock) {
+ return !ionFreeList(lock).empty();
+}
+
+jit::IonCompileTask* GlobalHelperThreadState::highestPriorityPendingIonCompile(
+ const AutoLockHelperThreadState& lock, bool checkExecutionStatus) {
+ auto& worklist = ionWorklist(lock);
+ MOZ_ASSERT(!worklist.empty());
+
+ // Get the highest priority IonCompileTask which has not started compilation
+ // yet.
+ size_t index = worklist.length();
+ for (size_t i = 0; i < worklist.length(); i++) {
+ if (checkExecutionStatus && !worklist[i]->isMainThreadRunningJS()) {
+ continue;
+ }
+ if (i < index ||
+ IonCompileTaskHasHigherPriority(worklist[i], worklist[index])) {
+ index = i;
+ }
+ }
+
+ if (index == worklist.length()) {
+ return nullptr;
+ }
+ jit::IonCompileTask* task = worklist[index];
+ worklist.erase(&worklist[index]);
+ return task;
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetParseTask(
+ const AutoLockHelperThreadState& lock) {
+ if (!canStartParseTask(lock)) {
+ return nullptr;
+ }
+
+ auto& worklist = parseWorklist(lock);
+ UniquePtr<ParseTask> task = std::move(worklist.back());
+ worklist.popBack();
+ return task.release();
+}
+
+bool GlobalHelperThreadState::canStartParseTask(
+ const AutoLockHelperThreadState& lock) {
+ // Parse tasks that end up compiling asm.js in turn may use Wasm compilation
+ // threads to generate machine code. We have no way (at present) to know
+ // ahead of time whether a parse task is going to parse asm.js content or not,
+ // so we just assume that all parse tasks are master tasks.
+ return !parseWorklist(lock).empty() &&
+ checkTaskThreadLimit(THREAD_TYPE_PARSE, maxParseThreads(),
+ /*isMaster=*/true, lock);
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetFreeDelazifyTask(
+ const AutoLockHelperThreadState& lock) {
+ auto& freeList = freeDelazifyTaskVector(lock);
+ if (!freeList.empty()) {
+ UniquePtr<FreeDelazifyTask> task = std::move(freeList.back());
+ freeList.popBack();
+ return task.release();
+ }
+ return nullptr;
+}
+
+bool GlobalHelperThreadState::canStartFreeDelazifyTask(
+ const AutoLockHelperThreadState& lock) {
+ return !freeDelazifyTaskVector(lock).empty() &&
+ checkTaskThreadLimit(THREAD_TYPE_DELAZIFY_FREE, maxParseThreads(),
+ /*isMaster=*/true, lock);
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetDelazifyTask(
+ const AutoLockHelperThreadState& lock) {
+ // NOTE: We want to span all cores availables with delazification tasks, in
+ // order to parse a maximum number of functions ahead of their executions.
+ // Thus, as opposed to parse task which have a higher priority, we are not
+ // exclusively executing these task on parse threads.
+ auto& worklist = delazifyWorklist(lock);
+ if (worklist.isEmpty()) {
+ return nullptr;
+ }
+ return worklist.popFirst();
+}
+
+bool GlobalHelperThreadState::canStartDelazifyTask(
+ const AutoLockHelperThreadState& lock) {
+ return !delazifyWorklist(lock).isEmpty() &&
+ checkTaskThreadLimit(THREAD_TYPE_DELAZIFY, maxParseThreads(),
+ /*isMaster=*/true, lock);
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetCompressionTask(
+ const AutoLockHelperThreadState& lock) {
+ if (!canStartCompressionTask(lock)) {
+ return nullptr;
+ }
+
+ auto& worklist = compressionWorklist(lock);
+ UniquePtr<SourceCompressionTask> task = std::move(worklist.back());
+ worklist.popBack();
+ return task.release();
+}
+
+bool GlobalHelperThreadState::canStartCompressionTask(
+ const AutoLockHelperThreadState& lock) {
+ return !compressionWorklist(lock).empty() &&
+ checkTaskThreadLimit(THREAD_TYPE_COMPRESS, maxCompressionThreads(),
+ lock);
+}
+
+void GlobalHelperThreadState::startHandlingCompressionTasks(
+ ScheduleCompressionTask schedule, JSRuntime* maybeRuntime,
+ const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT((schedule == ScheduleCompressionTask::GC) ==
+ (maybeRuntime != nullptr));
+
+ auto& pending = compressionPendingList(lock);
+
+ for (size_t i = 0; i < pending.length(); i++) {
+ UniquePtr<SourceCompressionTask>& task = pending[i];
+ if (schedule == ScheduleCompressionTask::API ||
+ (task->runtimeMatches(maybeRuntime) && task->shouldStart())) {
+ // OOMing during appending results in the task not being scheduled
+ // and deleted.
+ (void)submitTask(std::move(task), lock);
+ remove(pending, &i);
+ }
+ }
+}
+
+bool GlobalHelperThreadState::submitTask(
+ UniquePtr<SourceCompressionTask> task,
+ const AutoLockHelperThreadState& locked) {
+ if (!compressionWorklist(locked).append(std::move(task))) {
+ return false;
+ }
+
+ dispatch(DispatchReason::NewTask, locked);
+ return true;
+}
+
+bool GlobalHelperThreadState::submitTask(
+ GCParallelTask* task, const AutoLockHelperThreadState& locked) {
+ gcParallelWorklist().insertBack(task, locked);
+ dispatch(DispatchReason::NewTask, locked);
+ return true;
+}
+
+HelperThreadTask* GlobalHelperThreadState::maybeGetGCParallelTask(
+ const AutoLockHelperThreadState& lock) {
+ if (!canStartGCParallelTask(lock)) {
+ return nullptr;
+ }
+
+ return gcParallelWorklist().popFirst(lock);
+}
+
+bool GlobalHelperThreadState::canStartGCParallelTask(
+ const AutoLockHelperThreadState& lock) {
+ return !gcParallelWorklist().isEmpty(lock) &&
+ checkTaskThreadLimit(THREAD_TYPE_GCPARALLEL,
+ maxGCParallelThreads(lock), lock);
+}
+
+ParseTask* GlobalHelperThreadState::removeFinishedParseTask(
+ JSContext* cx, JS::OffThreadToken* token) {
+ // The token is really a ParseTask* which should be in the finished list.
+ auto task = static_cast<ParseTask*>(token);
+
+ // The token was passed in from the browser. Check that the pointer is likely
+ // a valid parse task of the expected kind.
+ MOZ_RELEASE_ASSERT(task->runtime == cx->runtime());
+
+ // Remove the task from the finished list.
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(parseFinishedList(lock).contains(task));
+ task->remove();
+ return task;
+}
+
+UniquePtr<ParseTask> GlobalHelperThreadState::finishParseTaskCommon(
+ JSContext* cx, JS::OffThreadToken* token) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ MOZ_ASSERT(cx->realm());
+
+ Rooted<UniquePtr<ParseTask>> parseTask(cx,
+ removeFinishedParseTask(cx, token));
+
+ if (!parseTask->fc_.convertToRuntimeError(cx)) {
+ return nullptr;
+ }
+
+ if (cx->isExceptionPending()) {
+ return nullptr;
+ }
+
+ return std::move(parseTask.get());
+}
+
+already_AddRefed<frontend::CompilationStencil>
+GlobalHelperThreadState::finishStencilTask(JSContext* cx,
+ JS::OffThreadToken* token,
+ JS::InstantiationStorage* storage) {
+ Rooted<UniquePtr<ParseTask>> parseTask(cx, finishParseTaskCommon(cx, token));
+ if (!parseTask) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(parseTask->compileStorage_.hasInput());
+ MOZ_ASSERT(parseTask->stencil_.get());
+
+ if (storage) {
+ MOZ_ASSERT(parseTask->options.allocateInstantiationStorage);
+ parseTask->moveInstantiationStorageInto(*storage);
+ }
+
+ return parseTask->stencil_.forget();
+}
+
+bool GlobalHelperThreadState::finishMultiParseTask(
+ JSContext* cx, ParseTaskKind kind, JS::OffThreadToken* token,
+ mozilla::Vector<RefPtr<JS::Stencil>>* stencils) {
+ MOZ_ASSERT(stencils);
+ Rooted<UniquePtr<ParseTask>> parseTask(cx, finishParseTaskCommon(cx, token));
+ if (!parseTask) {
+ return false;
+ }
+
+ MOZ_ASSERT(parseTask->kind == ParseTaskKind::MultiStencilsDecode);
+ auto task = static_cast<MultiStencilsDecodeTask*>(parseTask.get().get());
+ size_t expectedLength = task->sources->length();
+
+ if (!stencils->reserve(parseTask->stencils.length())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ for (auto& stencil : parseTask->stencils) {
+ stencils->infallibleEmplaceBack(stencil.forget());
+ }
+
+ if (stencils->length() != expectedLength) {
+ // No error was reported, but fewer stencils produced than expected.
+ // Assume we hit out of memory.
+ MOZ_ASSERT(false, "Expected more stencils");
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool GlobalHelperThreadState::finishMultiStencilsDecodeTask(
+ JSContext* cx, JS::OffThreadToken* token,
+ mozilla::Vector<RefPtr<JS::Stencil>>* stencils) {
+ return finishMultiParseTask(cx, ParseTaskKind::MultiStencilsDecode, token,
+ stencils);
+}
+
+void GlobalHelperThreadState::cancelParseTask(JSRuntime* rt,
+ JS::OffThreadToken* token) {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(token);
+
+ ParseTask* task = static_cast<ParseTask*>(token);
+
+ GlobalHelperThreadState::ParseTaskVector& worklist =
+ HelperThreadState().parseWorklist(lock);
+ for (size_t i = 0; i < worklist.length(); i++) {
+ if (task == worklist[i]) {
+ MOZ_ASSERT(task->runtimeMatches(rt));
+ HelperThreadState().remove(worklist, &i);
+ return;
+ }
+ }
+
+ // If task is currently running, wait for it to complete.
+ while (true) {
+ bool foundTask = false;
+ for (auto* helper : HelperThreadState().helperTasks(lock)) {
+ if (helper->is<ParseTask>() && helper->as<ParseTask>() == task) {
+ MOZ_ASSERT(helper->as<ParseTask>()->runtimeMatches(rt));
+ foundTask = true;
+ break;
+ }
+ }
+
+ if (!foundTask) {
+ break;
+ }
+
+ HelperThreadState().wait(lock);
+ }
+
+ auto& finished = HelperThreadState().parseFinishedList(lock);
+ for (auto* t : finished) {
+ if (task == t) {
+ MOZ_ASSERT(task->runtimeMatches(rt));
+ task->remove();
+ HelperThreadState().destroyParseTask(rt, task);
+ return;
+ }
+ }
+}
+
+void GlobalHelperThreadState::destroyParseTask(JSRuntime* rt,
+ ParseTask* parseTask) {
+ MOZ_ASSERT(!parseTask->isInList());
+ js_delete(parseTask);
+}
+
+void JSContext::addPendingOverRecursed() {
+ if (errors_) {
+ errors_->overRecursed = true;
+ }
+}
+
+void JSContext::addPendingOutOfMemory() {
+ // Keep in sync with recoverFromOutOfMemory.
+ if (errors_) {
+ errors_->outOfMemory = true;
+ }
+}
+
+bool js::EnqueueOffThreadCompression(JSContext* cx,
+ UniquePtr<SourceCompressionTask> task) {
+ MOZ_ASSERT(cx->isMainThreadContext());
+
+ AutoLockHelperThreadState lock;
+
+ auto& pending = HelperThreadState().compressionPendingList(lock);
+ if (!pending.append(std::move(task))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+void js::StartHandlingCompressionsOnGC(JSRuntime* runtime) {
+ AutoLockHelperThreadState lock;
+ HelperThreadState().startHandlingCompressionTasks(
+ GlobalHelperThreadState::ScheduleCompressionTask::GC, runtime, lock);
+}
+
+template <typename T>
+static void ClearCompressionTaskList(T& list, JSRuntime* runtime) {
+ for (size_t i = 0; i < list.length(); i++) {
+ if (list[i]->runtimeMatches(runtime)) {
+ HelperThreadState().remove(list, &i);
+ }
+ }
+}
+
+void js::CancelOffThreadCompressions(JSRuntime* runtime) {
+ if (!CanUseExtraThreads()) {
+ return;
+ }
+
+ AutoLockHelperThreadState lock;
+
+ // Cancel all pending compression tasks.
+ ClearCompressionTaskList(HelperThreadState().compressionPendingList(lock),
+ runtime);
+ ClearCompressionTaskList(HelperThreadState().compressionWorklist(lock),
+ runtime);
+
+ // Cancel all in-process compression tasks and wait for them to join so we
+ // clean up the finished tasks.
+ while (true) {
+ bool inProgress = false;
+ for (auto* helper : HelperThreadState().helperTasks(lock)) {
+ if (!helper->is<SourceCompressionTask>()) {
+ continue;
+ }
+
+ if (helper->as<SourceCompressionTask>()->runtimeMatches(runtime)) {
+ inProgress = true;
+ }
+ }
+
+ if (!inProgress) {
+ break;
+ }
+
+ HelperThreadState().wait(lock);
+ }
+
+ // Clean up finished tasks.
+ ClearCompressionTaskList(HelperThreadState().compressionFinishedList(lock),
+ runtime);
+}
+
+void js::AttachFinishedCompressions(JSRuntime* runtime,
+ AutoLockHelperThreadState& lock) {
+ auto& finished = HelperThreadState().compressionFinishedList(lock);
+ for (size_t i = 0; i < finished.length(); i++) {
+ if (finished[i]->runtimeMatches(runtime)) {
+ UniquePtr<SourceCompressionTask> compressionTask(std::move(finished[i]));
+ HelperThreadState().remove(finished, &i);
+ compressionTask->complete();
+ }
+ }
+}
+
+void js::SweepPendingCompressions(AutoLockHelperThreadState& lock) {
+ auto& pending = HelperThreadState().compressionPendingList(lock);
+ for (size_t i = 0; i < pending.length(); i++) {
+ if (pending[i]->shouldCancel()) {
+ HelperThreadState().remove(pending, &i);
+ }
+ }
+}
+
+void js::RunPendingSourceCompressions(JSRuntime* runtime) {
+ if (!CanUseExtraThreads()) {
+ return;
+ }
+
+ AutoLockHelperThreadState lock;
+
+ HelperThreadState().startHandlingCompressionTasks(
+ GlobalHelperThreadState::ScheduleCompressionTask::API, nullptr, lock);
+
+ // Wait until all tasks have started compression.
+ while (!HelperThreadState().compressionWorklist(lock).empty()) {
+ HelperThreadState().wait(lock);
+ }
+
+ // Wait for all in-process compression tasks to complete.
+ HelperThreadState().waitForAllTasksLocked(lock);
+
+ AttachFinishedCompressions(runtime, lock);
+}
+
+void PromiseHelperTask::executeAndResolveAndDestroy(JSContext* cx) {
+ execute();
+ run(cx, JS::Dispatchable::NotShuttingDown);
+}
+
+void PromiseHelperTask::runHelperThreadTask(AutoLockHelperThreadState& lock) {
+ {
+ AutoUnlockHelperThreadState unlock(lock);
+ execute();
+ }
+
+ // Don't release the lock between dispatching the resolve and destroy
+ // operation (which may start immediately on another thread) and returning
+ // from this method.
+
+ dispatchResolveAndDestroy(lock);
+}
+
+bool js::StartOffThreadPromiseHelperTask(JSContext* cx,
+ UniquePtr<PromiseHelperTask> task) {
+ // Execute synchronously if there are no helper threads.
+ if (!CanUseExtraThreads()) {
+ task.release()->executeAndResolveAndDestroy(cx);
+ return true;
+ }
+
+ if (!HelperThreadState().submitTask(task.get())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ (void)task.release();
+ return true;
+}
+
+bool js::StartOffThreadPromiseHelperTask(PromiseHelperTask* task) {
+ MOZ_ASSERT(CanUseExtraThreads());
+
+ return HelperThreadState().submitTask(task);
+}
+
+bool GlobalHelperThreadState::submitTask(PromiseHelperTask* task) {
+ AutoLockHelperThreadState lock;
+
+ if (!promiseHelperTasks(lock).append(task)) {
+ return false;
+ }
+
+ dispatch(DispatchReason::NewTask, lock);
+ return true;
+}
+
+void GlobalHelperThreadState::trace(JSTracer* trc) {
+ AutoLockHelperThreadState lock;
+
+#ifdef DEBUG
+ // Since we hold the helper thread lock here we must disable GCMarker's
+ // checking of the atom marking bitmap since that also relies on taking the
+ // lock.
+ GCMarker* marker = nullptr;
+ if (trc->isMarkingTracer()) {
+ marker = GCMarker::fromTracer(trc);
+ marker->setCheckAtomMarking(false);
+ }
+ auto reenableAtomMarkingCheck = mozilla::MakeScopeExit([marker] {
+ if (marker) {
+ marker->setCheckAtomMarking(true);
+ }
+ });
+#endif
+
+ for (auto task : ionWorklist(lock)) {
+ task->alloc().lifoAlloc()->setReadWrite();
+ task->trace(trc);
+ task->alloc().lifoAlloc()->setReadOnly();
+ }
+ for (auto task : ionFinishedList(lock)) {
+ task->trace(trc);
+ }
+
+ for (auto* helper : HelperThreadState().helperTasks(lock)) {
+ if (helper->is<jit::IonCompileTask>()) {
+ helper->as<jit::IonCompileTask>()->trace(trc);
+ }
+ }
+
+ JSRuntime* rt = trc->runtime();
+ if (auto* jitRuntime = rt->jitRuntime()) {
+ jit::IonCompileTask* task = jitRuntime->ionLazyLinkList(rt).getFirst();
+ while (task) {
+ task->trace(trc);
+ task = task->getNext();
+ }
+ }
+
+ for (auto& parseTask : parseWorklist_) {
+ parseTask->trace(trc);
+ }
+ for (auto parseTask : parseFinishedList_) {
+ parseTask->trace(trc);
+ }
+}
+
+// Definition of helper thread tasks.
+//
+// Priority is determined by the order they're listed here.
+const GlobalHelperThreadState::Selector GlobalHelperThreadState::selectors[] = {
+ &GlobalHelperThreadState::maybeGetGCParallelTask,
+ &GlobalHelperThreadState::maybeGetIonCompileTask,
+ &GlobalHelperThreadState::maybeGetWasmTier1CompileTask,
+ &GlobalHelperThreadState::maybeGetPromiseHelperTask,
+ &GlobalHelperThreadState::maybeGetParseTask,
+ &GlobalHelperThreadState::maybeGetFreeDelazifyTask,
+ &GlobalHelperThreadState::maybeGetDelazifyTask,
+ &GlobalHelperThreadState::maybeGetCompressionTask,
+ &GlobalHelperThreadState::maybeGetLowPrioIonCompileTask,
+ &GlobalHelperThreadState::maybeGetIonFreeTask,
+ &GlobalHelperThreadState::maybeGetWasmTier2CompileTask,
+ &GlobalHelperThreadState::maybeGetWasmTier2GeneratorTask};
+
+bool GlobalHelperThreadState::canStartTasks(
+ const AutoLockHelperThreadState& lock) {
+ return canStartGCParallelTask(lock) || canStartIonCompileTask(lock) ||
+ canStartWasmTier1CompileTask(lock) ||
+ canStartPromiseHelperTask(lock) || canStartParseTask(lock) ||
+ canStartFreeDelazifyTask(lock) || canStartDelazifyTask(lock) ||
+ canStartCompressionTask(lock) || canStartIonFreeTask(lock) ||
+ canStartWasmTier2CompileTask(lock) ||
+ canStartWasmTier2GeneratorTask(lock);
+}
+
+void JS::RunHelperThreadTask() {
+ MOZ_ASSERT(CanUseExtraThreads());
+
+ AutoLockHelperThreadState lock;
+
+ if (!gHelperThreadState || HelperThreadState().isTerminating(lock)) {
+ return;
+ }
+
+ HelperThreadState().runOneTask(lock);
+}
+
+void GlobalHelperThreadState::runOneTask(AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(tasksPending_ > 0);
+ tasksPending_--;
+
+ // The selectors may depend on the HelperThreadState not changing between task
+ // selection and task execution, in particular, on new tasks not being added
+ // (because of the lifo structure of the work lists). Unlocking the
+ // HelperThreadState between task selection and execution is not well-defined.
+ HelperThreadTask* task = findHighestPriorityTask(lock);
+ if (task) {
+ runTaskLocked(task, lock);
+ dispatch(DispatchReason::FinishedTask, lock);
+ }
+
+ notifyAll(lock);
+}
+
+HelperThreadTask* GlobalHelperThreadState::findHighestPriorityTask(
+ const AutoLockHelperThreadState& locked) {
+ // Return the highest priority task that is ready to start, or nullptr.
+
+ for (const auto& selector : selectors) {
+ if (auto* task = (this->*(selector))(locked)) {
+ return task;
+ }
+ }
+
+ return nullptr;
+}
+
+void GlobalHelperThreadState::runTaskLocked(HelperThreadTask* task,
+ AutoLockHelperThreadState& locked) {
+ JS::AutoSuppressGCAnalysis nogc;
+
+ HelperThreadState().helperTasks(locked).infallibleEmplaceBack(task);
+
+ ThreadType threadType = task->threadType();
+ js::oom::SetThreadType(threadType);
+ runningTaskCount[threadType]++;
+ totalCountRunningTasks++;
+
+ task->runHelperThreadTask(locked);
+
+ // Delete task from helperTasks.
+ HelperThreadState().helperTasks(locked).eraseIfEqual(task);
+
+ totalCountRunningTasks--;
+ runningTaskCount[threadType]--;
+
+ js::oom::SetThreadType(js::THREAD_TYPE_NONE);
+}
diff --git a/js/src/vm/HelperThreads.h b/js/src/vm/HelperThreads.h
new file mode 100644
index 0000000000..7b44a102ea
--- /dev/null
+++ b/js/src/vm/HelperThreads.h
@@ -0,0 +1,292 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * API for managing off-thread work.
+ */
+
+#ifndef vm_HelperThreads_h
+#define vm_HelperThreads_h
+
+#include "mozilla/Variant.h"
+
+#include "js/OffThreadScriptCompilation.h"
+#include "js/shadow/Zone.h"
+#include "js/Transcoding.h"
+#include "js/UniquePtr.h"
+#include "threading/LockGuard.h"
+#include "threading/Mutex.h"
+#include "wasm/WasmConstants.h"
+
+namespace mozilla {
+union Utf8Unit;
+}
+
+namespace JS {
+class OffThreadToken {};
+class JS_PUBLIC_API ReadOnlyCompileOptions;
+class Zone;
+
+template <typename UnitT>
+class SourceText;
+} // namespace JS
+
+namespace js {
+
+class AutoLockHelperThreadState;
+struct PromiseHelperTask;
+class SourceCompressionTask;
+
+namespace frontend {
+struct CompilationStencil;
+}
+
+namespace gc {
+class GCRuntime;
+}
+
+namespace jit {
+class IonCompileTask;
+class IonFreeTask;
+} // namespace jit
+
+namespace wasm {
+struct CompileTask;
+struct CompileTaskState;
+struct Tier2GeneratorTask;
+using UniqueTier2GeneratorTask = UniquePtr<Tier2GeneratorTask>;
+} // namespace wasm
+
+/*
+ * Lock protecting all mutable shared state accessed by helper threads, and used
+ * by all condition variables.
+ */
+extern Mutex gHelperThreadLock MOZ_UNANNOTATED;
+
+class MOZ_RAII AutoLockHelperThreadState : public LockGuard<Mutex> {
+ using Base = LockGuard<Mutex>;
+
+ public:
+ explicit AutoLockHelperThreadState() : Base(gHelperThreadLock) {}
+};
+
+class MOZ_RAII AutoUnlockHelperThreadState : public UnlockGuard<Mutex> {
+ using Base = UnlockGuard<Mutex>;
+
+ public:
+ explicit AutoUnlockHelperThreadState(AutoLockHelperThreadState& locked)
+ : Base(locked) {}
+};
+
+// Create data structures used by helper threads.
+bool CreateHelperThreadsState();
+
+// Destroy data structures used by helper threads.
+void DestroyHelperThreadsState();
+
+// Initialize helper threads unless already initialized.
+bool EnsureHelperThreadsInitialized();
+
+size_t GetHelperThreadCount();
+size_t GetHelperThreadCPUCount();
+size_t GetMaxWasmCompilationThreads();
+
+// This allows the JS shell to override GetCPUCount() when passed the
+// --thread-count=N option.
+bool SetFakeCPUCount(size_t count);
+
+// Enqueues a wasm compilation task.
+bool StartOffThreadWasmCompile(wasm::CompileTask* task, wasm::CompileMode mode);
+
+// Remove any pending wasm compilation tasks queued with
+// StartOffThreadWasmCompile that match the arguments. Return the number
+// removed.
+size_t RemovePendingWasmCompileTasks(const wasm::CompileTaskState& taskState,
+ wasm::CompileMode mode,
+ const AutoLockHelperThreadState& lock);
+
+// Enqueues a wasm compilation task.
+void StartOffThreadWasmTier2Generator(wasm::UniqueTier2GeneratorTask task);
+
+// Cancel all background Wasm Tier-2 compilations.
+void CancelOffThreadWasmTier2Generator();
+
+/*
+ * If helper threads are available, call execute() then dispatchResolve() on the
+ * given task in a helper thread. If no helper threads are available, the given
+ * task is executed and resolved synchronously.
+ *
+ * This function takes ownership of task unconditionally; if it fails, task is
+ * deleted.
+ */
+bool StartOffThreadPromiseHelperTask(JSContext* cx,
+ UniquePtr<PromiseHelperTask> task);
+
+/*
+ * Like the JSContext-accepting version, but only safe to use when helper
+ * threads are available, so we can be sure we'll never need to fall back on
+ * synchronous execution.
+ *
+ * This function can be called from any thread, but takes ownership of the task
+ * only on success. On OOM, it is the caller's responsibility to arrange for the
+ * task to be cleaned up properly.
+ */
+bool StartOffThreadPromiseHelperTask(PromiseHelperTask* task);
+
+/*
+ * Schedule an off-thread Ion compilation for a script, given a task.
+ */
+bool StartOffThreadIonCompile(jit::IonCompileTask* task,
+ const AutoLockHelperThreadState& lock);
+
+/*
+ * Schedule deletion of Ion compilation data.
+ */
+bool StartOffThreadIonFree(jit::IonCompileTask* task,
+ const AutoLockHelperThreadState& lock);
+
+void FinishOffThreadIonCompile(jit::IonCompileTask* task,
+ const AutoLockHelperThreadState& lock);
+
+struct ZonesInState {
+ JSRuntime* runtime;
+ JS::shadow::Zone::GCState state;
+};
+
+using CompilationSelector = mozilla::Variant<JSScript*, JS::Realm*, JS::Zone*,
+ ZonesInState, JSRuntime*>;
+
+/*
+ * Cancel scheduled or in progress Ion compilations.
+ */
+void CancelOffThreadIonCompile(const CompilationSelector& selector);
+
+inline void CancelOffThreadIonCompile(JSScript* script) {
+ CancelOffThreadIonCompile(CompilationSelector(script));
+}
+
+inline void CancelOffThreadIonCompile(JS::Realm* realm) {
+ CancelOffThreadIonCompile(CompilationSelector(realm));
+}
+
+inline void CancelOffThreadIonCompile(JS::Zone* zone) {
+ CancelOffThreadIonCompile(CompilationSelector(zone));
+}
+
+inline void CancelOffThreadIonCompile(JSRuntime* runtime,
+ JS::shadow::Zone::GCState state) {
+ CancelOffThreadIonCompile(CompilationSelector(ZonesInState{runtime, state}));
+}
+
+inline void CancelOffThreadIonCompile(JSRuntime* runtime) {
+ CancelOffThreadIonCompile(CompilationSelector(runtime));
+}
+
+#ifdef DEBUG
+bool HasOffThreadIonCompile(JS::Realm* realm);
+#endif
+
+// True iff the current thread is a ParseTask or a DelazifyTask.
+bool CurrentThreadIsParseThread();
+
+/*
+ * Cancel all scheduled, in progress or finished parses for runtime.
+ *
+ * Parse tasks which have completed but for which JS::FinishOffThreadScript (or
+ * equivalent) has not been called are removed from the system. This is only
+ * safe to do during shutdown, or if you know that the main thread isn't waiting
+ * for tasks to complete.
+ */
+void CancelOffThreadParses(JSRuntime* runtime);
+
+/*
+ * Cancel all scheduled or in progress eager delazification phases for a
+ * runtime.
+ */
+void CancelOffThreadDelazify(JSRuntime* runtime);
+
+/*
+ * Wait for all delazification to complete.
+ */
+void WaitForAllDelazifyTasks(JSRuntime* rt);
+
+/*
+ * Start a parse/emit cycle for a stream of source. The characters must stay
+ * alive until the compilation finishes.
+ */
+
+JS::OffThreadToken* StartOffThreadCompileToStencil(
+ JSContext* cx, const JS::ReadOnlyCompileOptions& options,
+ JS::SourceText<char16_t>& srcBuf, JS::OffThreadCompileCallback callback,
+ void* callbackData);
+JS::OffThreadToken* StartOffThreadCompileToStencil(
+ JSContext* cx, const JS::ReadOnlyCompileOptions& options,
+ JS::SourceText<mozilla::Utf8Unit>& srcBuf,
+ JS::OffThreadCompileCallback callback, void* callbackData);
+
+JS::OffThreadToken* StartOffThreadCompileModuleToStencil(
+ JSContext* cx, const JS::ReadOnlyCompileOptions& options,
+ JS::SourceText<char16_t>& srcBuf, JS::OffThreadCompileCallback callback,
+ void* callbackData);
+JS::OffThreadToken* StartOffThreadCompileModuleToStencil(
+ JSContext* cx, const JS::ReadOnlyCompileOptions& options,
+ JS::SourceText<mozilla::Utf8Unit>& srcBuf,
+ JS::OffThreadCompileCallback callback, void* callbackData);
+
+JS::OffThreadToken* StartOffThreadDecodeStencil(
+ JSContext* cx, const JS::DecodeOptions& options,
+ const JS::TranscodeRange& range, JS::OffThreadCompileCallback callback,
+ void* callbackData);
+
+JS::OffThreadToken* StartOffThreadDecodeMultiStencils(
+ JSContext* cx, const JS::DecodeOptions& options,
+ JS::TranscodeSources& sources, JS::OffThreadCompileCallback callback,
+ void* callbackData);
+
+// Start off-thread delazification task, to race the delazification of inner
+// functions.
+void StartOffThreadDelazification(JSContext* cx,
+ const JS::ReadOnlyCompileOptions& options,
+ const frontend::CompilationStencil& stencil);
+
+// Drain the task queues and wait for all helper threads to finish running.
+//
+// Note that helper threads are shared between runtimes and it's possible that
+// another runtime could saturate the helper thread system and cause this to
+// never return.
+void WaitForAllHelperThreads();
+void WaitForAllHelperThreads(AutoLockHelperThreadState& lock);
+
+// Enqueue a compression job to be processed later. These are started at the
+// start of the major GC after the next one.
+bool EnqueueOffThreadCompression(JSContext* cx,
+ UniquePtr<SourceCompressionTask> task);
+
+// Start handling any compression tasks for this runtime. Called at the start of
+// major GC.
+void StartHandlingCompressionsOnGC(JSRuntime* rt);
+
+// Cancel all scheduled, in progress, or finished compression tasks for
+// runtime.
+void CancelOffThreadCompressions(JSRuntime* runtime);
+
+void AttachFinishedCompressions(JSRuntime* runtime,
+ AutoLockHelperThreadState& lock);
+
+// Sweep pending tasks that are holding onto should-be-dead ScriptSources.
+void SweepPendingCompressions(AutoLockHelperThreadState& lock);
+
+// Run all pending source compression tasks synchronously, for testing purposes
+void RunPendingSourceCompressions(JSRuntime* runtime);
+
+// False if the off-thread source compression mechanism isn't being used. This
+// happens on low core count machines where we are concerned about blocking
+// main-thread execution.
+bool IsOffThreadSourceCompressionEnabled();
+
+} // namespace js
+
+#endif /* vm_HelperThreads_h */
diff --git a/js/src/vm/Id.cpp b/js/src/vm/Id.cpp
new file mode 100644
index 0000000000..ee713fa9d0
--- /dev/null
+++ b/js/src/vm/Id.cpp
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/Id.h"
+#include "js/RootingAPI.h"
+
+#include "vm/JSContext.h"
+#include "vm/SymbolType.h"
+
+#include "vm/JSAtom-inl.h"
+
+using namespace js;
+
+static const JS::PropertyKey voidKeyValue = JS::PropertyKey::Void();
+
+const JS::HandleId JS::VoidHandlePropertyKey =
+ JS::HandleId::fromMarkedLocation(&voidKeyValue);
+
+bool JS::PropertyKey::isPrivateName() const {
+ return isSymbol() && toSymbol()->isPrivateName();
+}
+
+bool JS::PropertyKey::isWellKnownSymbol(JS::SymbolCode code) const {
+ MOZ_ASSERT(uint32_t(code) < WellKnownSymbolLimit);
+ if (!isSymbol()) {
+ return false;
+ }
+ return toSymbol()->code() == code;
+}
+
+/* static */ JS::PropertyKey JS::PropertyKey::fromPinnedString(JSString* str) {
+ MOZ_ASSERT(AtomIsPinned(TlsContext.get(), &str->asAtom()));
+ return js::AtomToId(&str->asAtom());
+}
+
+/* static */ bool JS::PropertyKey::isNonIntAtom(JSAtom* atom) {
+ uint32_t index;
+ if (!atom->isIndex(&index)) {
+ return true;
+ }
+ static_assert(PropertyKey::IntMin == 0);
+ return index > PropertyKey::IntMax;
+}
+
+/* static */ bool JS::PropertyKey::isNonIntAtom(JSString* str) {
+ return JS::PropertyKey::isNonIntAtom(&str->asAtom());
+}
diff --git a/js/src/vm/Initialization.cpp b/js/src/vm/Initialization.cpp
new file mode 100644
index 0000000000..a0ff682327
--- /dev/null
+++ b/js/src/vm/Initialization.cpp
@@ -0,0 +1,357 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* SpiderMonkey initialization and shutdown code. */
+
+#include "js/Initialization.h"
+
+#include "mozilla/Assertions.h"
+#if JS_HAS_INTL_API
+# include "mozilla/intl/ICU4CLibrary.h"
+#endif
+#include "mozilla/TextUtils.h"
+
+#include "jstypes.h"
+
+#include "builtin/AtomicsObject.h"
+#include "builtin/TestingFunctions.h"
+#include "gc/Statistics.h"
+#include "jit/Assembler.h"
+#include "jit/Ion.h"
+#include "jit/JitOptions.h"
+#include "jit/Simulator.h"
+#include "js/Utility.h"
+#include "threading/ProtectedData.h" // js::AutoNoteSingleThreadedRegion
+#include "util/Poison.h"
+#include "vm/ArrayBufferObject.h"
+#include "vm/DateTime.h"
+#include "vm/HelperThreads.h"
+#include "vm/Runtime.h"
+#include "vm/Time.h"
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+#include "wasm/WasmProcess.h"
+
+using js::FutexThread;
+using JS::detail::InitState;
+using JS::detail::libraryInitState;
+
+InitState JS::detail::libraryInitState;
+
+#ifdef DEBUG
+static unsigned MessageParameterCount(const char* format) {
+ unsigned numfmtspecs = 0;
+ for (const char* fmt = format; *fmt != '\0'; fmt++) {
+ if (*fmt == '{' && mozilla::IsAsciiDigit(fmt[1])) {
+ ++numfmtspecs;
+ }
+ }
+ return numfmtspecs;
+}
+
+static void CheckMessageParameterCounts() {
+ // Assert that each message format has the correct number of braced
+ // parameters.
+# define MSG_DEF(name, count, exception, format) \
+ MOZ_ASSERT(MessageParameterCount(format) == count);
+# include "js/friend/ErrorNumbers.msg"
+# undef MSG_DEF
+}
+#endif /* DEBUG */
+
+#if defined(JS_RUNTIME_CANONICAL_NAN)
+namespace JS::detail {
+uint64_t CanonicalizedNaNBits;
+} // namespace JS::detail
+#endif
+
+static void SetupCanonicalNaN() {
+ // Compute the standard NaN value that the hardware generates.
+ volatile double infinity = mozilla::PositiveInfinity<double>();
+ volatile double hardwareNaN = infinity - infinity;
+ uint64_t hardwareNaNBits = mozilla::BitwiseCast<uint64_t>(hardwareNaN);
+ hardwareNaNBits &= ~mozilla::FloatingPoint<double>::kSignBit;
+
+#if defined(JS_NONCANONICAL_HARDWARE_NAN)
+ // If the NaN generated by hardware operations is not compatible
+ // with our canonical NaN, we must canonicalize every double. This
+ // is implemented for C++ code in Value::bitsFromDouble, but is not
+ // implemented for JIT code.
+# if !defined(JS_CODEGEN_NONE)
+# error "No JIT support for non-canonical hardware NaN"
+# endif
+
+ (void)hardwareNaNBits;
+#elif defined(JS_RUNTIME_CANONICAL_NAN)
+ // Determine canonical NaN at startup. It must still match the ValueIsDouble
+ // requirements.
+ MOZ_RELEASE_ASSERT(JS::detail::ValueIsDouble(hardwareNaNBits));
+ JS::detail::CanonicalizedNaNBits = hardwareNaNBits;
+#else
+ // Assert that the NaN generated by hardware operations is
+ // compatible with the canonical NaN we use for JS::Value. This is
+ // true for all of our supported platforms, but not for SPARC.
+ MOZ_RELEASE_ASSERT(hardwareNaNBits == JS::detail::CanonicalizedNaNBits,
+ "Unexpected default hardware NaN value");
+#endif
+}
+
+#define RETURN_IF_FAIL(code) \
+ do { \
+ if (!code) return #code " failed"; \
+ } while (0)
+
+extern "C" void install_rust_hooks();
+
+JS_PUBLIC_API const char* JS::detail::InitWithFailureDiagnostic(
+ bool isDebugBuild, FrontendOnly frontendOnly /* = FrontendOnly::No */) {
+ // Verify that our DEBUG setting matches the caller's.
+#ifdef DEBUG
+ MOZ_RELEASE_ASSERT(isDebugBuild);
+#else
+ MOZ_RELEASE_ASSERT(!isDebugBuild);
+#endif
+
+ MOZ_ASSERT(libraryInitState == InitState::Uninitialized,
+ "must call JS_Init once before any JSAPI operation except "
+ "JS_SetICUMemoryFunctions");
+ MOZ_ASSERT(!JSRuntime::hasLiveRuntimes(),
+ "how do we have live runtimes before JS_Init?");
+
+ libraryInitState = InitState::Initializing;
+
+#ifdef JS_STANDALONE
+ // The rust hooks are initialized by Gecko on non-standalone builds.
+ install_rust_hooks();
+#endif
+
+ PRMJ_NowInit();
+
+ if (frontendOnly == FrontendOnly::No) {
+ // The first invocation of `ProcessCreation` creates a temporary thread
+ // and crashes if that fails, i.e. because we're out of memory. To prevent
+ // that from happening at some later time, get it out of the way during
+ // startup.
+ mozilla::TimeStamp::ProcessCreation();
+ }
+
+#ifdef DEBUG
+ CheckMessageParameterCounts();
+#endif
+
+ SetupCanonicalNaN();
+
+ if (frontendOnly == FrontendOnly::No) {
+ RETURN_IF_FAIL(js::TlsContext.init());
+ }
+
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ RETURN_IF_FAIL(js::oom::InitThreadType());
+#endif
+
+#if defined(FUZZING)
+ js::oom::InitLargeAllocLimit();
+#endif
+
+#if defined(JS_GC_ALLOW_EXTRA_POISONING)
+ if (getenv("JSGC_EXTRA_POISONING")) {
+ js::gExtraPoisoningEnabled = true;
+ }
+#endif
+
+ js::InitMallocAllocator();
+
+ RETURN_IF_FAIL(js::Mutex::Init());
+
+ js::gc::InitMemorySubsystem(); // Ensure gc::SystemPageSize() works.
+
+ RETURN_IF_FAIL(js::wasm::Init());
+
+ js::coverage::InitLCov();
+
+ if (frontendOnly == FrontendOnly::No) {
+ RETURN_IF_FAIL(js::jit::InitializeJit());
+ }
+
+ RETURN_IF_FAIL(js::InitDateTimeState());
+
+ if (frontendOnly == FrontendOnly::No) {
+#ifdef MOZ_VTUNE
+ RETURN_IF_FAIL(js::vtune::Initialize());
+#endif
+ }
+
+#if JS_HAS_INTL_API
+ if (mozilla::intl::ICU4CLibrary::Initialize().isErr()) {
+ return "ICU4CLibrary::Initialize() failed";
+ }
+#endif // JS_HAS_INTL_API
+
+ if (frontendOnly == FrontendOnly::No) {
+ RETURN_IF_FAIL(js::CreateHelperThreadsState());
+ RETURN_IF_FAIL(FutexThread::initialize());
+ RETURN_IF_FAIL(js::gcstats::Statistics::initialize());
+ RETURN_IF_FAIL(js::InitTestingFunctions());
+ }
+
+ RETURN_IF_FAIL(js::SharedImmutableStringsCache::initSingleton());
+ RETURN_IF_FAIL(js::frontend::WellKnownParserAtoms::initSingleton());
+
+ if (frontendOnly == FrontendOnly::No) {
+#ifdef JS_SIMULATOR
+ RETURN_IF_FAIL(js::jit::SimulatorProcess::initialize());
+#endif
+
+#ifndef JS_CODEGEN_NONE
+ // This is forced by InitializeJit.
+ MOZ_ASSERT(js::jit::CPUFlagsHaveBeenComputed());
+#endif
+ }
+
+ libraryInitState = InitState::Running;
+ return nullptr;
+}
+
+#undef RETURN_IF_FAIL
+
+JS_PUBLIC_API bool JS::InitSelfHostedCode(JSContext* cx, SelfHostedCache cache,
+ SelfHostedWriter writer) {
+ MOZ_RELEASE_ASSERT(!cx->runtime()->hasInitializedSelfHosting(),
+ "JS::InitSelfHostedCode() called more than once");
+
+ js::AutoNoteSingleThreadedRegion anstr;
+
+ JSRuntime* rt = cx->runtime();
+
+ if (!rt->initSelfHostingStencil(cx, cache, writer)) {
+ return false;
+ }
+
+ if (!rt->initializeAtoms(cx)) {
+ return false;
+ }
+
+ if (!rt->initSelfHostingFromStencil(cx)) {
+ return false;
+ }
+
+ if (js::jit::HasJitBackend()) {
+ if (!rt->createJitRuntime(cx)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void ShutdownImpl(JS::detail::FrontendOnly frontendOnly) {
+ using FrontendOnly = JS::detail::FrontendOnly;
+
+ MOZ_ASSERT(
+ libraryInitState == InitState::Running,
+ "JS_ShutDown must only be called after JS_Init and can't race with it");
+#ifdef DEBUG
+ if (JSRuntime::hasLiveRuntimes()) {
+ // Gecko is too buggy to assert this just yet.
+ fprintf(stderr,
+ "WARNING: YOU ARE LEAKING THE WORLD (at least one JSRuntime "
+ "and everything alive inside it, that is) AT JS_ShutDown "
+ "TIME. FIX THIS!\n");
+ }
+#endif
+
+ js::frontend::WellKnownParserAtoms::freeSingleton();
+ js::SharedImmutableStringsCache::freeSingleton();
+
+ if (frontendOnly == FrontendOnly::No) {
+ FutexThread::destroy();
+
+ js::DestroyHelperThreadsState();
+
+#ifdef JS_SIMULATOR
+ js::jit::SimulatorProcess::destroy();
+#endif
+ }
+
+ js::wasm::ShutDown();
+
+ // The only difficult-to-address reason for the restriction that you can't
+ // call JS_Init/stuff/JS_ShutDown multiple times is the Windows PRMJ
+ // NowInit initialization code, which uses PR_CallOnce to initialize the
+ // PRMJ_Now subsystem. (For reinitialization to be permitted, we'd need to
+ // "reset" the called-once status -- doable, but more trouble than it's
+ // worth now.) Initializing that subsystem from JS_Init eliminates the
+ // problem, but initialization can take a comparatively long time (15ms or
+ // so), so we really don't want to do it in JS_Init, and we really do want
+ // to do it only when PRMJ_Now is eventually called.
+ PRMJ_NowShutdown();
+
+#if JS_HAS_INTL_API
+ mozilla::intl::ICU4CLibrary::Cleanup();
+#endif // JS_HAS_INTL_API
+
+ if (frontendOnly == FrontendOnly::No) {
+#ifdef MOZ_VTUNE
+ js::vtune::Shutdown();
+#endif // MOZ_VTUNE
+ }
+
+ js::FinishDateTimeState();
+
+ if (frontendOnly == FrontendOnly::No) {
+ js::jit::ShutdownJit();
+ }
+
+ MOZ_ASSERT_IF(!JSRuntime::hasLiveRuntimes(), !js::WasmReservedBytes());
+
+ js::ShutDownMallocAllocator();
+
+ libraryInitState = InitState::ShutDown;
+}
+
+JS_PUBLIC_API void JS_ShutDown(void) {
+ ShutdownImpl(JS::detail::FrontendOnly::No);
+}
+
+JS_PUBLIC_API void JS_FrontendOnlyShutDown(void) {
+ ShutdownImpl(JS::detail::FrontendOnly::Yes);
+}
+
+JS_PUBLIC_API bool JS_SetICUMemoryFunctions(JS_ICUAllocFn allocFn,
+ JS_ICUReallocFn reallocFn,
+ JS_ICUFreeFn freeFn) {
+ MOZ_ASSERT(libraryInitState == InitState::Uninitialized,
+ "must call JS_SetICUMemoryFunctions before any other JSAPI "
+ "operation (including JS_Init)");
+
+#if JS_HAS_INTL_API
+ return mozilla::intl::ICU4CLibrary::SetMemoryFunctions(
+ {allocFn, reallocFn, freeFn})
+ .isOk();
+#else
+ return true;
+#endif
+}
+
+#if defined(ENABLE_WASM_SIMD) && \
+ (defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86))
+void JS::SetAVXEnabled(bool enabled) {
+ if (enabled) {
+ js::jit::CPUInfo::SetAVXEnabled();
+ } else {
+ js::jit::CPUInfo::SetAVXDisabled();
+ }
+}
+#endif
+
+JS_PUBLIC_API void JS::DisableJitBackend() {
+ MOZ_ASSERT(libraryInitState == InitState::Uninitialized,
+ "DisableJitBackend must be called before JS_Init");
+ MOZ_ASSERT(!JSRuntime::hasLiveRuntimes(),
+ "DisableJitBackend must be called before creating a JSContext");
+ js::jit::JitOptions.disableJitBackend = true;
+}
diff --git a/js/src/vm/InlineCharBuffer-inl.h b/js/src/vm/InlineCharBuffer-inl.h
new file mode 100644
index 0000000000..ac006a0d98
--- /dev/null
+++ b/js/src/vm/InlineCharBuffer-inl.h
@@ -0,0 +1,158 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_InlineCharBuffer_inl_h
+#define vm_InlineCharBuffer_inl_h
+
+#include "vm/StringType-inl.h"
+
+namespace js {
+
+template <typename CharT>
+struct MaximumInlineLength;
+
+template <>
+struct MaximumInlineLength<Latin1Char> {
+ static constexpr size_t value = JSFatInlineString::MAX_LENGTH_LATIN1;
+};
+
+template <>
+struct MaximumInlineLength<char16_t> {
+ static constexpr size_t value = JSFatInlineString::MAX_LENGTH_TWO_BYTE;
+};
+
+// Character buffer class used for ToLowerCase and ToUpperCase operations, as
+// well as other string operations where the final string length is known in
+// advance.
+//
+// Case conversion operations normally return a string with the same length as
+// the input string. To avoid over-allocation, we optimistically allocate an
+// array with same size as the input string and only when we detect special
+// casing characters, which can change the output string length, we reallocate
+// the output buffer to the final string length.
+//
+// As a further mean to improve runtime performance, the character buffer
+// contains an inline storage, so we don't need to heap-allocate an array when
+// a JSInlineString will be used for the output string.
+//
+// Why not use mozilla::Vector instead? mozilla::Vector doesn't provide enough
+// fine-grained control to avoid over-allocation when (re)allocating for exact
+// buffer sizes. This led to visible performance regressions in µ-benchmarks.
+template <typename CharT>
+class MOZ_NON_PARAM InlineCharBuffer {
+ static constexpr size_t InlineCapacity = MaximumInlineLength<CharT>::value;
+
+ CharT inlineStorage[InlineCapacity];
+ UniquePtr<CharT[], JS::FreePolicy> heapStorage;
+
+#ifdef DEBUG
+ // In debug mode, we keep track of the requested string lengths to ensure
+ // all character buffer methods are called in the correct order and with
+ // the expected argument values.
+ size_t lastRequestedLength = 0;
+
+ void assertValidRequest(size_t expectedLastLength, size_t length) {
+ MOZ_ASSERT(length >= expectedLastLength, "cannot shrink requested length");
+ MOZ_ASSERT(lastRequestedLength == expectedLastLength);
+ lastRequestedLength = length;
+ }
+#else
+ void assertValidRequest(size_t expectedLastLength, size_t length) {}
+#endif
+
+ public:
+ CharT* get() { return heapStorage ? heapStorage.get() : inlineStorage; }
+
+ bool maybeAlloc(JSContext* cx, size_t length) {
+ assertValidRequest(0, length);
+
+ if (length <= InlineCapacity) {
+ return true;
+ }
+
+ MOZ_ASSERT(!heapStorage, "heap storage already allocated");
+ heapStorage =
+ cx->make_pod_arena_array<CharT>(js::StringBufferArena, length);
+ return !!heapStorage;
+ }
+
+ bool maybeRealloc(JSContext* cx, size_t oldLength, size_t newLength) {
+ assertValidRequest(oldLength, newLength);
+
+ if (newLength <= InlineCapacity) {
+ return true;
+ }
+
+ if (!heapStorage) {
+ heapStorage =
+ cx->make_pod_arena_array<CharT>(js::StringBufferArena, newLength);
+ if (!heapStorage) {
+ return false;
+ }
+
+ MOZ_ASSERT(oldLength <= InlineCapacity);
+ mozilla::PodCopy(heapStorage.get(), inlineStorage, oldLength);
+ return true;
+ }
+
+ CharT* oldChars = heapStorage.release();
+ CharT* newChars = cx->pod_arena_realloc(js::StringBufferArena, oldChars,
+ oldLength, newLength);
+ if (!newChars) {
+ js_free(oldChars);
+ return false;
+ }
+
+ heapStorage.reset(newChars);
+ return true;
+ }
+
+ JSString* toStringDontDeflate(JSContext* cx, size_t length,
+ js::gc::Heap heap = js::gc::Heap::Default) {
+ MOZ_ASSERT(length == lastRequestedLength);
+
+ if (JSInlineString::lengthFits<CharT>(length)) {
+ MOZ_ASSERT(
+ !heapStorage,
+ "expected only inline storage when length fits in inline string");
+
+ if (JSString* str = TryEmptyOrStaticString(cx, inlineStorage, length)) {
+ return str;
+ }
+
+ mozilla::Range<const CharT> range(inlineStorage, length);
+ return NewInlineString<CanGC>(cx, range, heap);
+ }
+
+ MOZ_ASSERT(heapStorage,
+ "heap storage was not allocated for non-inline string");
+
+ return NewStringDontDeflate<CanGC>(cx, std::move(heapStorage), length,
+ heap);
+ }
+
+ JSString* toString(JSContext* cx, size_t length,
+ js::gc::Heap heap = js::gc::Heap::Default) {
+ MOZ_ASSERT(length == lastRequestedLength);
+
+ if (JSInlineString::lengthFits<CharT>(length)) {
+ MOZ_ASSERT(
+ !heapStorage,
+ "expected only inline storage when length fits in inline string");
+
+ return NewStringCopyN<CanGC>(cx, inlineStorage, length, heap);
+ }
+
+ MOZ_ASSERT(heapStorage,
+ "heap storage was not allocated for non-inline string");
+
+ return NewString<CanGC>(cx, std::move(heapStorage), length, heap);
+ }
+};
+
+} /* namespace js */
+
+#endif /* vm_InlineCharBuffer_inl_h */
diff --git a/js/src/vm/InternalThreadPool.cpp b/js/src/vm/InternalThreadPool.cpp
new file mode 100644
index 0000000000..483e995254
--- /dev/null
+++ b/js/src/vm/InternalThreadPool.cpp
@@ -0,0 +1,289 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/InternalThreadPool.h"
+
+#include "mozilla/TimeStamp.h"
+
+#include "js/ProfilingCategory.h"
+#include "js/ProfilingStack.h"
+#include "threading/Thread.h"
+#include "util/NativeStack.h"
+#include "vm/HelperThreadState.h"
+#include "vm/JSContext.h"
+
+// We want our default stack size limit to be approximately 2MB, to be safe, but
+// expect most threads to use much less. On Linux, however, requesting a stack
+// of 2MB or larger risks the kernel allocating an entire 2MB huge page for it
+// on first access, which we do not want. To avoid this possibility, we subtract
+// 2 standard VM page sizes from our default.
+static const uint32_t kDefaultHelperStackSize = 2048 * 1024 - 2 * 4096;
+
+// TSan enforces a minimum stack size that's just slightly larger than our
+// default helper stack size. It does this to store blobs of TSan-specific
+// data on each thread's stack. Unfortunately, that means that even though
+// we'll actually receive a larger stack than we requested, the effective
+// usable space of that stack is significantly less than what we expect.
+// To offset TSan stealing our stack space from underneath us, double the
+// default.
+//
+// Note that we don't need this for ASan/MOZ_ASAN because ASan doesn't
+// require all the thread-specific state that TSan does.
+#if defined(MOZ_TSAN)
+static const uint32_t HELPER_STACK_SIZE = 2 * kDefaultHelperStackSize;
+#else
+static const uint32_t HELPER_STACK_SIZE = kDefaultHelperStackSize;
+#endif
+
+// These macros are identical in function to the same-named ones in
+// GeckoProfiler.h, but they are defined separately because SpiderMonkey can't
+// use GeckoProfiler.h.
+#define PROFILER_RAII_PASTE(id, line) id##line
+#define PROFILER_RAII_EXPAND(id, line) PROFILER_RAII_PASTE(id, line)
+#define PROFILER_RAII PROFILER_RAII_EXPAND(raiiObject, __LINE__)
+#define AUTO_PROFILER_LABEL(label, categoryPair) \
+ HelperThread::AutoProfilerLabel PROFILER_RAII( \
+ this, label, JS::ProfilingCategoryPair::categoryPair)
+
+using namespace js;
+
+namespace js {
+
+class HelperThread {
+ Thread thread;
+
+ /*
+ * The profiling thread for this helper thread, which can be used to push
+ * and pop label frames.
+ * This field being non-null indicates that this thread has been registered
+ * and needs to be unregistered at shutdown.
+ */
+ ProfilingStack* profilingStack = nullptr;
+
+ public:
+ HelperThread();
+ [[nodiscard]] bool init(InternalThreadPool* pool);
+
+ ThreadId threadId() { return thread.get_id(); }
+
+ void join();
+
+ static void ThreadMain(InternalThreadPool* pool, HelperThread* helper);
+ void threadLoop(InternalThreadPool* pool);
+
+ void ensureRegisteredWithProfiler();
+ void unregisterWithProfilerIfNeeded();
+
+ private:
+ struct AutoProfilerLabel {
+ AutoProfilerLabel(HelperThread* helperThread, const char* label,
+ JS::ProfilingCategoryPair categoryPair);
+ ~AutoProfilerLabel();
+
+ private:
+ ProfilingStack* profilingStack;
+ };
+};
+
+} // namespace js
+
+InternalThreadPool* InternalThreadPool::Instance = nullptr;
+
+/* static */ InternalThreadPool& InternalThreadPool::Get() {
+ MOZ_ASSERT(IsInitialized());
+ return *Instance;
+}
+
+/* static */
+bool InternalThreadPool::Initialize(size_t threadCount,
+ AutoLockHelperThreadState& lock) {
+ if (IsInitialized()) {
+ return true;
+ }
+
+ auto instance = MakeUnique<InternalThreadPool>();
+ if (!instance) {
+ return false;
+ }
+
+ if (!instance->ensureThreadCount(threadCount, lock)) {
+ instance->shutDown(lock);
+ return false;
+ }
+
+ Instance = instance.release();
+ HelperThreadState().setDispatchTaskCallback(DispatchTask, threadCount,
+ HELPER_STACK_SIZE, lock);
+ return true;
+}
+
+bool InternalThreadPool::ensureThreadCount(size_t threadCount,
+ AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(threads(lock).length() < threadCount);
+
+ if (!threads(lock).reserve(threadCount)) {
+ return false;
+ }
+
+ while (threads(lock).length() < threadCount) {
+ auto thread = js::MakeUnique<HelperThread>();
+ if (!thread || !thread->init(this)) {
+ return false;
+ }
+
+ threads(lock).infallibleEmplaceBack(std::move(thread));
+ }
+
+ return true;
+}
+
+size_t InternalThreadPool::threadCount(const AutoLockHelperThreadState& lock) {
+ return threads(lock).length();
+}
+
+/* static */
+void InternalThreadPool::ShutDown(AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(HelperThreadState().isTerminating(lock));
+
+ Get().shutDown(lock);
+ js_delete(Instance);
+ Instance = nullptr;
+}
+
+void InternalThreadPool::shutDown(AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(!terminating);
+ terminating = true;
+
+ notifyAll(lock);
+
+ for (auto& thread : threads(lock)) {
+ AutoUnlockHelperThreadState unlock(lock);
+ thread->join();
+ }
+}
+
+inline HelperThreadVector& InternalThreadPool::threads(
+ const AutoLockHelperThreadState& lock) {
+ return threads_.ref();
+}
+inline const HelperThreadVector& InternalThreadPool::threads(
+ const AutoLockHelperThreadState& lock) const {
+ return threads_.ref();
+}
+
+size_t InternalThreadPool::sizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf,
+ const AutoLockHelperThreadState& lock) const {
+ return sizeof(InternalThreadPool) +
+ threads(lock).sizeOfExcludingThis(mallocSizeOf);
+}
+
+/* static */
+void InternalThreadPool::DispatchTask(JS::DispatchReason reason) {
+ Get().dispatchTask(reason);
+}
+
+void InternalThreadPool::dispatchTask(JS::DispatchReason reason) {
+ gHelperThreadLock.assertOwnedByCurrentThread();
+ queuedTasks++;
+ if (reason == JS::DispatchReason::NewTask) {
+ wakeup.notify_one();
+ } else {
+ // We're called from a helper thread right before returning to
+ // HelperThread::threadLoop. There we will check queuedTasks so there's no
+ // need to wake up any threads.
+ MOZ_ASSERT(reason == JS::DispatchReason::FinishedTask);
+ MOZ_ASSERT(!TlsContext.get(), "we should be on a helper thread");
+ }
+}
+
+void InternalThreadPool::notifyAll(const AutoLockHelperThreadState& lock) {
+ wakeup.notify_all();
+}
+
+void InternalThreadPool::wait(AutoLockHelperThreadState& lock) {
+ wakeup.wait_for(lock, mozilla::TimeDuration::Forever());
+}
+
+HelperThread::HelperThread()
+ : thread(Thread::Options().setStackSize(HELPER_STACK_SIZE)) {}
+
+bool HelperThread::init(InternalThreadPool* pool) {
+ return thread.init(HelperThread::ThreadMain, pool, this);
+}
+
+void HelperThread::join() { thread.join(); }
+
+/* static */
+void HelperThread::ThreadMain(InternalThreadPool* pool, HelperThread* helper) {
+ ThisThread::SetName("JS Helper");
+
+ helper->ensureRegisteredWithProfiler();
+ helper->threadLoop(pool);
+ helper->unregisterWithProfilerIfNeeded();
+}
+
+void HelperThread::ensureRegisteredWithProfiler() {
+ if (profilingStack) {
+ return;
+ }
+
+ // Note: To avoid dead locks, we should not hold on the helper thread lock
+ // while calling this function. This is safe because the registerThread field
+ // is a WriteOnceData<> type stored on the global helper tread state.
+ JS::RegisterThreadCallback callback = HelperThreadState().registerThread;
+ if (callback) {
+ profilingStack =
+ callback("JS Helper", reinterpret_cast<void*>(GetNativeStackBase()));
+ }
+}
+
+void HelperThread::unregisterWithProfilerIfNeeded() {
+ if (!profilingStack) {
+ return;
+ }
+
+ // Note: To avoid dead locks, we should not hold on the helper thread lock
+ // while calling this function. This is safe because the unregisterThread
+ // field is a WriteOnceData<> type stored on the global helper tread state.
+ JS::UnregisterThreadCallback callback = HelperThreadState().unregisterThread;
+ if (callback) {
+ callback();
+ profilingStack = nullptr;
+ }
+}
+
+HelperThread::AutoProfilerLabel::AutoProfilerLabel(
+ HelperThread* helperThread, const char* label,
+ JS::ProfilingCategoryPair categoryPair)
+ : profilingStack(helperThread->profilingStack) {
+ if (profilingStack) {
+ profilingStack->pushLabelFrame(label, nullptr, this, categoryPair);
+ }
+}
+
+HelperThread::AutoProfilerLabel::~AutoProfilerLabel() {
+ if (profilingStack) {
+ profilingStack->pop();
+ }
+}
+
+void HelperThread::threadLoop(InternalThreadPool* pool) {
+ MOZ_ASSERT(CanUseExtraThreads());
+
+ AutoLockHelperThreadState lock;
+
+ while (!pool->terminating) {
+ if (pool->queuedTasks != 0) {
+ pool->queuedTasks--;
+ HelperThreadState().runOneTask(lock);
+ continue;
+ }
+
+ AUTO_PROFILER_LABEL("HelperThread::threadLoop::wait", IDLE);
+ pool->wait(lock);
+ }
+}
diff --git a/js/src/vm/InternalThreadPool.h b/js/src/vm/InternalThreadPool.h
new file mode 100644
index 0000000000..1d5c6d1a43
--- /dev/null
+++ b/js/src/vm/InternalThreadPool.h
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * An internal thread pool, used for the shell and when
+ * JS::SetHelperThreadTaskCallback not called.
+ */
+
+#ifndef vm_InternalThreadPool_h
+#define vm_InternalThreadPool_h
+
+#include "js/AllocPolicy.h"
+#include "js/UniquePtr.h"
+#include "js/Vector.h"
+#include "threading/ConditionVariable.h"
+#include "threading/ProtectedData.h"
+
+namespace JS {
+enum class DispatchReason;
+};
+
+namespace js {
+
+class AutoLockHelperThreadState;
+class HelperThread;
+
+using HelperThreadVector =
+ Vector<UniquePtr<HelperThread>, 0, SystemAllocPolicy>;
+
+class InternalThreadPool {
+ public:
+ static bool Initialize(size_t threadCount, AutoLockHelperThreadState& lock);
+ static void ShutDown(AutoLockHelperThreadState& lock);
+
+ static bool IsInitialized() { return Instance; }
+ static InternalThreadPool& Get();
+
+ bool ensureThreadCount(size_t threadCount, AutoLockHelperThreadState& lock);
+ size_t threadCount(const AutoLockHelperThreadState& lock);
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ const AutoLockHelperThreadState& lock) const;
+
+ private:
+ static void DispatchTask(JS::DispatchReason reason);
+
+ void dispatchTask(JS::DispatchReason reason);
+ void shutDown(AutoLockHelperThreadState& lock);
+
+ HelperThreadVector& threads(const AutoLockHelperThreadState& lock);
+ const HelperThreadVector& threads(
+ const AutoLockHelperThreadState& lock) const;
+
+ void notifyAll(const AutoLockHelperThreadState& lock);
+ void wait(AutoLockHelperThreadState& lock);
+ friend class HelperThread;
+
+ static InternalThreadPool* Instance;
+
+ HelperThreadLockData<HelperThreadVector> threads_;
+
+ js::ConditionVariable wakeup;
+
+ HelperThreadLockData<size_t> queuedTasks;
+
+ HelperThreadLockData<bool> terminating;
+};
+
+} // namespace js
+
+#endif /* vm_InternalThreadPool_h */
diff --git a/js/src/vm/Interpreter-inl.h b/js/src/vm/Interpreter-inl.h
new file mode 100644
index 0000000000..62016a3d65
--- /dev/null
+++ b/js/src/vm/Interpreter-inl.h
@@ -0,0 +1,639 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Interpreter_inl_h
+#define vm_Interpreter_inl_h
+
+#include "vm/Interpreter.h"
+
+#include "jsnum.h"
+
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "vm/ArgumentsObject.h"
+#include "vm/BigIntType.h"
+#include "vm/BytecodeUtil.h" // JSDVG_SEARCH_STACK
+#include "vm/Realm.h"
+#include "vm/SharedStencil.h" // GCThingIndex
+#include "vm/StaticStrings.h"
+#include "vm/ThrowMsgKind.h"
+#ifdef ENABLE_RECORD_TUPLE
+# include "vm/RecordTupleShared.h"
+#endif
+
+#include "vm/GlobalObject-inl.h"
+#include "vm/JSAtom-inl.h"
+#include "vm/JSContext-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/ObjectOperations-inl.h"
+#include "vm/StringType-inl.h"
+
+namespace js {
+
+/*
+ * Per ES6, lexical declarations may not be accessed in any fashion until they
+ * are initialized (i.e., until the actual declaring statement is
+ * executed). The various LEXICAL opcodes need to check if the slot is an
+ * uninitialized let declaration, represented by the magic value
+ * JS_UNINITIALIZED_LEXICAL.
+ */
+static inline bool IsUninitializedLexical(const Value& val) {
+ // Use whyMagic here because JS_OPTIMIZED_OUT could flow into here.
+ return val.isMagic() && val.whyMagic() == JS_UNINITIALIZED_LEXICAL;
+}
+
+static inline bool IsUninitializedLexicalSlot(HandleObject obj,
+ const PropertyResult& prop) {
+ MOZ_ASSERT(prop.isFound());
+ if (obj->is<WithEnvironmentObject>()) {
+ return false;
+ }
+
+ // Proxy hooks may return a non-native property.
+ if (prop.isNonNativeProperty()) {
+ return false;
+ }
+
+ PropertyInfo propInfo = prop.propertyInfo();
+ if (!propInfo.isDataProperty()) {
+ return false;
+ }
+
+ return IsUninitializedLexical(
+ obj->as<NativeObject>().getSlot(propInfo.slot()));
+}
+
+static inline bool CheckUninitializedLexical(JSContext* cx, PropertyName* name_,
+ HandleValue val) {
+ if (IsUninitializedLexical(val)) {
+ Rooted<PropertyName*> name(cx, name_);
+ ReportRuntimeLexicalError(cx, JSMSG_UNINITIALIZED_LEXICAL, name);
+ return false;
+ }
+ return true;
+}
+
+inline bool GetLengthProperty(const Value& lval, MutableHandleValue vp) {
+ /* Optimize length accesses on strings, arrays, and arguments. */
+ if (lval.isString()) {
+ vp.setInt32(lval.toString()->length());
+ return true;
+ }
+ if (lval.isObject()) {
+ JSObject* obj = &lval.toObject();
+ if (obj->is<ArrayObject>()) {
+ vp.setNumber(obj->as<ArrayObject>().length());
+ return true;
+ }
+
+ if (obj->is<ArgumentsObject>()) {
+ ArgumentsObject* argsobj = &obj->as<ArgumentsObject>();
+ if (!argsobj->hasOverriddenLength()) {
+ uint32_t length = argsobj->initialLength();
+ MOZ_ASSERT(length < INT32_MAX);
+ vp.setInt32(int32_t(length));
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+enum class GetNameMode { Normal, TypeOf };
+
+template <GetNameMode mode>
+inline bool FetchName(JSContext* cx, HandleObject receiver, HandleObject holder,
+ Handle<PropertyName*> name, const PropertyResult& prop,
+ MutableHandleValue vp) {
+ if (prop.isNotFound()) {
+ switch (mode) {
+ case GetNameMode::Normal:
+ ReportIsNotDefined(cx, name);
+ return false;
+ case GetNameMode::TypeOf:
+ vp.setUndefined();
+ return true;
+ }
+ }
+
+ /* Take the slow path if shape was not found in a native object. */
+ if (!receiver->is<NativeObject>() || !holder->is<NativeObject>()) {
+ Rooted<jsid> id(cx, NameToId(name));
+ if (!GetProperty(cx, receiver, receiver, id, vp)) {
+ return false;
+ }
+ } else {
+ PropertyInfo propInfo = prop.propertyInfo();
+ if (propInfo.isDataProperty()) {
+ /* Fast path for Object instance properties. */
+ vp.set(holder->as<NativeObject>().getSlot(propInfo.slot()));
+ } else {
+ // Unwrap 'with' environments for reasons given in
+ // GetNameBoundInEnvironment.
+ RootedObject normalized(cx, MaybeUnwrapWithEnvironment(receiver));
+ RootedId id(cx, NameToId(name));
+ if (!NativeGetExistingProperty(cx, normalized, holder.as<NativeObject>(),
+ id, propInfo, vp)) {
+ return false;
+ }
+ }
+ }
+
+ // We do our own explicit checking for |this|
+ if (name == cx->names().dotThis) {
+ return true;
+ }
+
+ // NAME operations are the slow paths already, so unconditionally check
+ // for uninitialized lets.
+ return CheckUninitializedLexical(cx, name, vp);
+}
+
+inline bool FetchNameNoGC(NativeObject* pobj, PropertyResult prop, Value* vp) {
+ if (prop.isNotFound()) {
+ return false;
+ }
+
+ PropertyInfo propInfo = prop.propertyInfo();
+ if (!propInfo.isDataProperty()) {
+ return false;
+ }
+
+ *vp = pobj->getSlot(propInfo.slot());
+ return !IsUninitializedLexical(*vp);
+}
+
+template <js::GetNameMode mode>
+inline bool GetEnvironmentName(JSContext* cx, HandleObject envChain,
+ Handle<PropertyName*> name,
+ MutableHandleValue vp) {
+ {
+ PropertyResult prop;
+ JSObject* obj = nullptr;
+ NativeObject* pobj = nullptr;
+ if (LookupNameNoGC(cx, name, envChain, &obj, &pobj, &prop)) {
+ if (FetchNameNoGC(pobj, prop, vp.address())) {
+ return true;
+ }
+ }
+ }
+
+ PropertyResult prop;
+ RootedObject obj(cx), pobj(cx);
+ if (!LookupName(cx, name, envChain, &obj, &pobj, &prop)) {
+ return false;
+ }
+
+ return FetchName<mode>(cx, obj, pobj, name, prop, vp);
+}
+
+inline bool HasOwnProperty(JSContext* cx, HandleValue val, HandleValue idValue,
+ bool* result) {
+ // As an optimization, provide a fast path when rooting is not necessary and
+ // we can safely retrieve the object's shape.
+ jsid id;
+ if (val.isObject() && idValue.isPrimitive() &&
+ PrimitiveValueToId<NoGC>(cx, idValue, &id)) {
+ JSObject* obj = &val.toObject();
+ PropertyResult prop;
+ if (obj->is<NativeObject>() &&
+ NativeLookupOwnProperty<NoGC>(cx, &obj->as<NativeObject>(), id,
+ &prop)) {
+ *result = prop.isFound();
+ return true;
+ }
+ }
+
+ // Step 1.
+ RootedId key(cx);
+ if (!ToPropertyKey(cx, idValue, &key)) {
+ return false;
+ }
+
+ // Step 2.
+ RootedObject obj(cx, ToObject(cx, val));
+ if (!obj) {
+ return false;
+ }
+
+ // Step 3.
+ return HasOwnProperty(cx, obj, key, result);
+}
+
+inline bool GetIntrinsicOperation(JSContext* cx, HandleScript script,
+ jsbytecode* pc, MutableHandleValue vp) {
+ Rooted<PropertyName*> name(cx, script->getName(pc));
+ return GlobalObject::getIntrinsicValue(cx, cx->global(), name, vp);
+}
+
+inline bool SetIntrinsicOperation(JSContext* cx, JSScript* script,
+ jsbytecode* pc, HandleValue val) {
+ Rooted<PropertyName*> name(cx, script->getName(pc));
+ return GlobalObject::setIntrinsicValue(cx, cx->global(), name, val);
+}
+
+inline bool SetNameOperation(JSContext* cx, JSScript* script, jsbytecode* pc,
+ HandleObject env, HandleValue val) {
+ MOZ_ASSERT(JSOp(*pc) == JSOp::SetName || JSOp(*pc) == JSOp::StrictSetName ||
+ JSOp(*pc) == JSOp::SetGName || JSOp(*pc) == JSOp::StrictSetGName);
+ MOZ_ASSERT_IF(
+ JSOp(*pc) == JSOp::SetGName || JSOp(*pc) == JSOp::StrictSetGName,
+ !script->hasNonSyntacticScope());
+ MOZ_ASSERT_IF(
+ JSOp(*pc) == JSOp::SetGName || JSOp(*pc) == JSOp::StrictSetGName,
+ env == cx->global() || env == &cx->global()->lexicalEnvironment() ||
+ env->is<RuntimeLexicalErrorObject>());
+
+ bool strict =
+ JSOp(*pc) == JSOp::StrictSetName || JSOp(*pc) == JSOp::StrictSetGName;
+ Rooted<PropertyName*> name(cx, script->getName(pc));
+
+ // In strict mode, assigning to an undeclared global variable is an
+ // error. To detect this, we call NativeSetProperty directly and pass
+ // Unqualified. It stores the error, if any, in |result|.
+ bool ok;
+ ObjectOpResult result;
+ RootedId id(cx, NameToId(name));
+ RootedValue receiver(cx, ObjectValue(*env));
+ if (env->isUnqualifiedVarObj()) {
+ Rooted<NativeObject*> varobj(cx);
+ if (env->is<DebugEnvironmentProxy>()) {
+ varobj =
+ &env->as<DebugEnvironmentProxy>().environment().as<NativeObject>();
+ } else {
+ varobj = &env->as<NativeObject>();
+ }
+ MOZ_ASSERT(!varobj->getOpsSetProperty());
+ ok = NativeSetProperty<Unqualified>(cx, varobj, id, val, receiver, result);
+ } else {
+ ok = SetProperty(cx, env, id, val, receiver, result);
+ }
+ return ok && result.checkStrictModeError(cx, env, id, strict);
+}
+
+inline void InitGlobalLexicalOperation(
+ JSContext* cx, ExtensibleLexicalEnvironmentObject* lexicalEnv,
+ JSScript* script, jsbytecode* pc, HandleValue value) {
+ MOZ_ASSERT_IF(!script->hasNonSyntacticScope(),
+ lexicalEnv == &cx->global()->lexicalEnvironment());
+ MOZ_ASSERT(JSOp(*pc) == JSOp::InitGLexical);
+
+ mozilla::Maybe<PropertyInfo> prop =
+ lexicalEnv->lookup(cx, script->getName(pc));
+ MOZ_ASSERT(prop.isSome());
+ MOZ_ASSERT(IsUninitializedLexical(lexicalEnv->getSlot(prop->slot())));
+
+ lexicalEnv->setSlot(prop->slot(), value);
+}
+
+inline bool InitPropertyOperation(JSContext* cx, jsbytecode* pc,
+ HandleObject obj, Handle<PropertyName*> name,
+ HandleValue rhs) {
+ unsigned propAttrs = GetInitDataPropAttrs(JSOp(*pc));
+ return DefineDataProperty(cx, obj, name, rhs, propAttrs);
+}
+
+static MOZ_ALWAYS_INLINE bool NegOperation(JSContext* cx,
+ MutableHandleValue val,
+ MutableHandleValue res) {
+ /*
+ * When the operand is int jsval, INT32_FITS_IN_JSVAL(i) implies
+ * INT32_FITS_IN_JSVAL(-i) unless i is 0 or INT32_MIN when the
+ * results, -0.0 or INT32_MAX + 1, are double values.
+ */
+ int32_t i;
+ if (val.isInt32() && (i = val.toInt32()) != 0 && i != INT32_MIN) {
+ res.setInt32(-i);
+ return true;
+ }
+
+ if (!ToNumeric(cx, val)) {
+ return false;
+ }
+
+ if (val.isBigInt()) {
+ return BigInt::negValue(cx, val, res);
+ }
+
+ res.setNumber(-val.toNumber());
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool IncOperation(JSContext* cx, HandleValue val,
+ MutableHandleValue res) {
+ int32_t i;
+ if (val.isInt32() && (i = val.toInt32()) != INT32_MAX) {
+ res.setInt32(i + 1);
+ return true;
+ }
+
+ if (val.isNumber()) {
+ res.setNumber(val.toNumber() + 1);
+ return true;
+ }
+
+ MOZ_ASSERT(val.isBigInt(), "+1 only callable on result of JSOp::ToNumeric");
+ return BigInt::incValue(cx, val, res);
+}
+
+static MOZ_ALWAYS_INLINE bool DecOperation(JSContext* cx, HandleValue val,
+ MutableHandleValue res) {
+ int32_t i;
+ if (val.isInt32() && (i = val.toInt32()) != INT32_MIN) {
+ res.setInt32(i - 1);
+ return true;
+ }
+
+ if (val.isNumber()) {
+ res.setNumber(val.toNumber() - 1);
+ return true;
+ }
+
+ MOZ_ASSERT(val.isBigInt(), "-1 only callable on result of JSOp::ToNumeric");
+ return BigInt::decValue(cx, val, res);
+}
+
+static MOZ_ALWAYS_INLINE bool ToPropertyKeyOperation(JSContext* cx,
+ HandleValue idval,
+ MutableHandleValue res) {
+ if (idval.isInt32()) {
+ res.set(idval);
+ return true;
+ }
+
+ RootedId id(cx);
+ if (!ToPropertyKey(cx, idval, &id)) {
+ return false;
+ }
+
+ res.set(IdToValue(id));
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool GetObjectElementOperation(
+ JSContext* cx, JSOp op, JS::HandleObject obj, JS::HandleValue receiver,
+ HandleValue key, MutableHandleValue res) {
+ MOZ_ASSERT(op == JSOp::GetElem || op == JSOp::GetElemSuper);
+ MOZ_ASSERT_IF(op == JSOp::GetElem, obj == &receiver.toObject());
+
+ do {
+ uint32_t index;
+ if (IsDefinitelyIndex(key, &index)) {
+ if (GetElementNoGC(cx, obj, receiver, index, res.address())) {
+ break;
+ }
+
+ if (!GetElement(cx, obj, receiver, index, res)) {
+ return false;
+ }
+ break;
+ }
+
+ if (key.isString()) {
+ JSString* str = key.toString();
+ JSAtom* name = str->isAtom() ? &str->asAtom() : AtomizeString(cx, str);
+ if (!name) {
+ return false;
+ }
+ if (name->isIndex(&index)) {
+ if (GetElementNoGC(cx, obj, receiver, index, res.address())) {
+ break;
+ }
+ } else {
+ if (GetPropertyNoGC(cx, obj, receiver, name->asPropertyName(),
+ res.address())) {
+ break;
+ }
+ }
+ }
+
+ RootedId id(cx);
+ if (!ToPropertyKey(cx, key, &id)) {
+ return false;
+ }
+ if (!GetProperty(cx, obj, receiver, id, res)) {
+ return false;
+ }
+ } while (false);
+
+ cx->debugOnlyCheck(res);
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool GetPrimitiveElementOperation(
+ JSContext* cx, JS::HandleValue receiver, int receiverIndex, HandleValue key,
+ MutableHandleValue res) {
+#ifdef ENABLE_RECORD_TUPLE
+ if (receiver.isExtendedPrimitive()) {
+ RootedId id(cx);
+ if (!ToPropertyKey(cx, key, &id)) {
+ return false;
+ }
+ RootedObject obj(cx, &receiver.toExtendedPrimitive());
+ if (!ExtendedPrimitiveGetProperty(cx, obj, receiver, id, res)) {
+ return false;
+ }
+ }
+#endif
+
+ // FIXME: Bug 1234324 We shouldn't be boxing here.
+ RootedObject boxed(
+ cx, ToObjectFromStackForPropertyAccess(cx, receiver, receiverIndex, key));
+ if (!boxed) {
+ return false;
+ }
+
+ do {
+ uint32_t index;
+ if (IsDefinitelyIndex(key, &index)) {
+ if (GetElementNoGC(cx, boxed, receiver, index, res.address())) {
+ break;
+ }
+
+ if (!GetElement(cx, boxed, receiver, index, res)) {
+ return false;
+ }
+ break;
+ }
+
+ if (key.isString()) {
+ JSString* str = key.toString();
+ JSAtom* name = str->isAtom() ? &str->asAtom() : AtomizeString(cx, str);
+ if (!name) {
+ return false;
+ }
+ if (name->isIndex(&index)) {
+ if (GetElementNoGC(cx, boxed, receiver, index, res.address())) {
+ break;
+ }
+ } else {
+ if (GetPropertyNoGC(cx, boxed, receiver, name->asPropertyName(),
+ res.address())) {
+ break;
+ }
+ }
+ }
+
+ RootedId id(cx);
+ if (!ToPropertyKey(cx, key, &id)) {
+ return false;
+ }
+ if (!GetProperty(cx, boxed, receiver, id, res)) {
+ return false;
+ }
+ } while (false);
+
+ cx->debugOnlyCheck(res);
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool GetElementOperationWithStackIndex(
+ JSContext* cx, HandleValue lref, int lrefIndex, HandleValue rref,
+ MutableHandleValue res) {
+ uint32_t index;
+ if (lref.isString() && IsDefinitelyIndex(rref, &index)) {
+ JSString* str = lref.toString();
+ if (index < str->length()) {
+ str = cx->staticStrings().getUnitStringForElement(cx, str, index);
+ if (!str) {
+ return false;
+ }
+ res.setString(str);
+ return true;
+ }
+ }
+
+ if (lref.isPrimitive()) {
+ return GetPrimitiveElementOperation(cx, lref, lrefIndex, rref, res);
+ }
+
+ RootedObject obj(cx, &lref.toObject());
+ return GetObjectElementOperation(cx, JSOp::GetElem, obj, lref, rref, res);
+}
+
+// Wrapper for callVM from JIT.
+static MOZ_ALWAYS_INLINE bool GetElementOperation(JSContext* cx,
+ HandleValue lref,
+ HandleValue rref,
+ MutableHandleValue res) {
+ return GetElementOperationWithStackIndex(cx, lref, JSDVG_SEARCH_STACK, rref,
+ res);
+}
+
+static MOZ_ALWAYS_INLINE JSString* TypeOfOperation(const Value& v,
+ JSRuntime* rt) {
+ JSType type = js::TypeOfValue(v);
+ return TypeName(type, *rt->commonNames);
+}
+
+static MOZ_ALWAYS_INLINE bool InitElemOperation(JSContext* cx, jsbytecode* pc,
+ HandleObject obj,
+ HandleValue idval,
+ HandleValue val) {
+ MOZ_ASSERT(!val.isMagic(JS_ELEMENTS_HOLE));
+
+ RootedId id(cx);
+ if (!ToPropertyKey(cx, idval, &id)) {
+ return false;
+ }
+
+ unsigned flags = GetInitDataPropAttrs(JSOp(*pc));
+ if (id.isPrivateName()) {
+ // Clear enumerate flag off of private names.
+ flags &= ~JSPROP_ENUMERATE;
+ }
+ return DefineDataProperty(cx, obj, id, val, flags);
+}
+
+static MOZ_ALWAYS_INLINE bool CheckPrivateFieldOperation(JSContext* cx,
+ jsbytecode* pc,
+ HandleValue val,
+ HandleValue idval,
+ bool* result) {
+ MOZ_ASSERT(idval.isSymbol());
+ MOZ_ASSERT(idval.toSymbol()->isPrivateName());
+
+ // Result had better not be a nullptr.
+ MOZ_ASSERT(result);
+
+ ThrowCondition condition;
+ ThrowMsgKind msgKind;
+ GetCheckPrivateFieldOperands(pc, &condition, &msgKind);
+
+ // When we are using OnlyCheckRhs, we are implementing PrivateInExpr
+ // This requires we throw if the rhs is not an object;
+ //
+ // The InlineCache for CheckPrivateField already checks for a
+ // non-object rhs and refuses to attach in that circumstance.
+ if (condition == ThrowCondition::OnlyCheckRhs) {
+ if (!val.isObject()) {
+ ReportInNotObjectError(cx, idval, val);
+ return false;
+ }
+ }
+
+ // Invoke the HostEnsureCanAddPrivateElement ( O ) host hook here
+ // if the code is attempting to attach a new private element (which
+ // corresponds to the ThrowHas Throw Condition).
+ if (condition == ThrowCondition::ThrowHas) {
+ if (JS::EnsureCanAddPrivateElementOp op =
+ cx->runtime()->canAddPrivateElement) {
+ if (!op(cx, val)) {
+ return false;
+ }
+ }
+ }
+
+ if (!HasOwnProperty(cx, val, idval, result)) {
+ return false;
+ }
+
+ if (!CheckPrivateFieldWillThrow(condition, *result)) {
+ return true;
+ }
+
+ // Throw!
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ ThrowMsgKindToErrNum(msgKind));
+ return false;
+}
+
+static inline JS::Symbol* NewPrivateName(JSContext* cx, Handle<JSAtom*> name) {
+ return JS::Symbol::new_(cx, JS::SymbolCode::PrivateNameSymbol, name);
+}
+
+inline bool InitElemIncOperation(JSContext* cx, Handle<ArrayObject*> arr,
+ uint32_t index, HandleValue val) {
+ if (index == INT32_MAX) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SPREAD_TOO_LARGE);
+ return false;
+ }
+
+ // If val is a hole, do not call DefineDataElement.
+ if (val.isMagic(JS_ELEMENTS_HOLE)) {
+ // Always call SetLengthProperty even if this is not the last element
+ // initialiser, because this may be followed by a SpreadElement loop,
+ // which will not set the array length if nothing is spread.
+ return SetLengthProperty(cx, arr, index + 1);
+ }
+
+ return DefineDataElement(cx, arr, index, val, JSPROP_ENUMERATE);
+}
+
+inline JSFunction* ReportIfNotFunction(
+ JSContext* cx, HandleValue v, MaybeConstruct construct = NO_CONSTRUCT) {
+ if (v.isObject() && v.toObject().is<JSFunction>()) {
+ return &v.toObject().as<JSFunction>();
+ }
+
+ ReportIsNotFunction(cx, v, -1, construct);
+ return nullptr;
+}
+
+} /* namespace js */
+
+#endif /* vm_Interpreter_inl_h */
diff --git a/js/src/vm/Interpreter.cpp b/js/src/vm/Interpreter.cpp
new file mode 100644
index 0000000000..dce63bb110
--- /dev/null
+++ b/js/src/vm/Interpreter.cpp
@@ -0,0 +1,5605 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * JavaScript bytecode interpreter.
+ */
+
+#include "vm/Interpreter-inl.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/Sprintf.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/WrappingOperations.h"
+
+#include <string.h>
+
+#include "jsapi.h"
+#include "jslibmath.h"
+#include "jsmath.h"
+#include "jsnum.h"
+
+#include "builtin/Array.h"
+#include "builtin/Eval.h"
+#include "builtin/ModuleObject.h"
+#include "builtin/Object.h"
+#include "builtin/Promise.h"
+#include "gc/GC.h"
+#include "jit/AtomicOperations.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Jit.h"
+#include "jit/JitRuntime.h"
+#include "js/experimental/JitInfo.h" // JSJitInfo
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
+#include "js/friend/WindowProxy.h" // js::IsWindowProxy
+#include "js/Printer.h"
+#include "util/CheckedArithmetic.h"
+#include "util/StringBuffer.h"
+#include "vm/AsyncFunction.h"
+#include "vm/AsyncIteration.h"
+#include "vm/BigIntType.h"
+#include "vm/BytecodeUtil.h" // JSDVG_SEARCH_STACK
+#include "vm/EqualityOperations.h" // js::StrictlyEqual
+#include "vm/GeneratorObject.h"
+#include "vm/Iteration.h"
+#include "vm/JSAtom.h"
+#include "vm/JSContext.h"
+#include "vm/JSFunction.h"
+#include "vm/JSObject.h"
+#include "vm/JSScript.h"
+#include "vm/Opcodes.h"
+#include "vm/PIC.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/Scope.h"
+#include "vm/Shape.h"
+#include "vm/SharedStencil.h" // GCThingIndex
+#include "vm/StringType.h"
+#include "vm/ThrowMsgKind.h" // ThrowMsgKind
+#include "vm/Time.h"
+#ifdef ENABLE_RECORD_TUPLE
+# include "vm/RecordType.h"
+# include "vm/TupleType.h"
+#endif
+
+#include "builtin/Boolean-inl.h"
+#include "debugger/DebugAPI-inl.h"
+#include "vm/ArgumentsObject-inl.h"
+#include "vm/EnvironmentObject-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/JSScript-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/ObjectOperations-inl.h"
+#include "vm/PlainObject-inl.h" // js::CopyInitializerObject, js::CreateThis
+#include "vm/Probes-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+
+using mozilla::DebugOnly;
+using mozilla::NumberEqualsInt32;
+
+using js::jit::JitScript;
+
+template <bool Eq>
+static MOZ_ALWAYS_INLINE bool LooseEqualityOp(JSContext* cx,
+ InterpreterRegs& regs) {
+ HandleValue rval = regs.stackHandleAt(-1);
+ HandleValue lval = regs.stackHandleAt(-2);
+ bool cond;
+ if (!LooselyEqual(cx, lval, rval, &cond)) {
+ return false;
+ }
+ cond = (cond == Eq);
+ regs.sp--;
+ regs.sp[-1].setBoolean(cond);
+ return true;
+}
+
+JSObject* js::BoxNonStrictThis(JSContext* cx, HandleValue thisv) {
+ MOZ_ASSERT(!thisv.isMagic());
+
+ if (thisv.isNullOrUndefined()) {
+ return cx->global()->lexicalEnvironment().thisObject();
+ }
+
+ if (thisv.isObject()) {
+ return &thisv.toObject();
+ }
+
+ return PrimitiveToObject(cx, thisv);
+}
+
+bool js::GetFunctionThis(JSContext* cx, AbstractFramePtr frame,
+ MutableHandleValue res) {
+ MOZ_ASSERT(frame.isFunctionFrame());
+ MOZ_ASSERT(!frame.callee()->isArrow());
+
+ if (frame.thisArgument().isObject() || frame.callee()->strict()) {
+ res.set(frame.thisArgument());
+ return true;
+ }
+
+ MOZ_ASSERT(!frame.callee()->isSelfHostedBuiltin(),
+ "Self-hosted builtins must be strict");
+
+ RootedValue thisv(cx, frame.thisArgument());
+
+ // If there is a NSVO on environment chain, use it as basis for fallback
+ // global |this|. This gives a consistent definition of global lexical
+ // |this| between function and global contexts.
+ //
+ // NOTE: If only non-syntactic WithEnvironments are on the chain, we use the
+ // global lexical |this| value. This is for compatibility with the Subscript
+ // Loader.
+ if (frame.script()->hasNonSyntacticScope() && thisv.isNullOrUndefined()) {
+ RootedObject env(cx, frame.environmentChain());
+ while (true) {
+ if (IsNSVOLexicalEnvironment(env) || IsGlobalLexicalEnvironment(env)) {
+ res.setObject(*GetThisObjectOfLexical(env));
+ return true;
+ }
+ if (!env->enclosingEnvironment()) {
+ // This can only happen in Debugger eval frames: in that case we
+ // don't always have a global lexical env, see EvaluateInEnv.
+ MOZ_ASSERT(env->is<GlobalObject>());
+ res.setObject(*GetThisObject(env));
+ return true;
+ }
+ env = env->enclosingEnvironment();
+ }
+ }
+
+ JSObject* obj = BoxNonStrictThis(cx, thisv);
+ if (!obj) {
+ return false;
+ }
+
+ res.setObject(*obj);
+ return true;
+}
+
+void js::GetNonSyntacticGlobalThis(JSContext* cx, HandleObject envChain,
+ MutableHandleValue res) {
+ RootedObject env(cx, envChain);
+ while (true) {
+ if (IsExtensibleLexicalEnvironment(env)) {
+ res.setObject(*GetThisObjectOfLexical(env));
+ return;
+ }
+ if (!env->enclosingEnvironment()) {
+ // This can only happen in Debugger eval frames: in that case we
+ // don't always have a global lexical env, see EvaluateInEnv.
+ MOZ_ASSERT(env->is<GlobalObject>());
+ res.setObject(*GetThisObject(env));
+ return;
+ }
+ env = env->enclosingEnvironment();
+ }
+}
+
+#ifdef DEBUG
+static bool IsSelfHostedOrKnownBuiltinCtor(JSFunction* fun, JSContext* cx) {
+ if (fun->isSelfHostedOrIntrinsic()) {
+ return true;
+ }
+
+ // GetBuiltinConstructor in ArrayGroupToMap
+ if (fun == cx->global()->maybeGetConstructor(JSProto_Map)) {
+ return true;
+ }
+
+ // GetBuiltinConstructor in intlFallbackSymbol
+ if (fun == cx->global()->maybeGetConstructor(JSProto_Symbol)) {
+ return true;
+ }
+
+ // ConstructorForTypedArray in MergeSortTypedArray
+ if (fun == cx->global()->maybeGetConstructor(JSProto_Int8Array) ||
+ fun == cx->global()->maybeGetConstructor(JSProto_Uint8Array) ||
+ fun == cx->global()->maybeGetConstructor(JSProto_Int16Array) ||
+ fun == cx->global()->maybeGetConstructor(JSProto_Uint16Array) ||
+ fun == cx->global()->maybeGetConstructor(JSProto_Int32Array) ||
+ fun == cx->global()->maybeGetConstructor(JSProto_Uint32Array) ||
+ fun == cx->global()->maybeGetConstructor(JSProto_Float32Array) ||
+ fun == cx->global()->maybeGetConstructor(JSProto_Float64Array) ||
+ fun == cx->global()->maybeGetConstructor(JSProto_Uint8ClampedArray) ||
+ fun == cx->global()->maybeGetConstructor(JSProto_BigInt64Array) ||
+ fun == cx->global()->maybeGetConstructor(JSProto_BigUint64Array)) {
+ return true;
+ }
+
+ return false;
+}
+#endif // DEBUG
+
+bool js::Debug_CheckSelfHosted(JSContext* cx, HandleValue funVal) {
+#ifdef DEBUG
+ JSFunction* fun = &UncheckedUnwrap(&funVal.toObject())->as<JSFunction>();
+ MOZ_ASSERT(IsSelfHostedOrKnownBuiltinCtor(fun, cx),
+ "functions directly called inside self-hosted JS must be one of "
+ "selfhosted function, self-hosted intrinsic, or known built-in "
+ "constructor");
+#else
+ MOZ_CRASH("self-hosted checks should only be done in Debug builds");
+#endif
+
+ // This is purely to police self-hosted code. There is no actual operation.
+ return true;
+}
+
+static inline bool GetPropertyOperation(JSContext* cx,
+ Handle<PropertyName*> name,
+ HandleValue lval,
+ MutableHandleValue vp) {
+ if (name == cx->names().length && GetLengthProperty(lval, vp)) {
+ return true;
+ }
+
+ return GetProperty(cx, lval, name, vp);
+}
+
+static inline bool GetNameOperation(JSContext* cx, HandleObject envChain,
+ Handle<PropertyName*> name, JSOp nextOp,
+ MutableHandleValue vp) {
+ /* Kludge to allow (typeof foo == "undefined") tests. */
+ if (nextOp == JSOp::Typeof) {
+ return GetEnvironmentName<GetNameMode::TypeOf>(cx, envChain, name, vp);
+ }
+ return GetEnvironmentName<GetNameMode::Normal>(cx, envChain, name, vp);
+}
+
+bool js::GetImportOperation(JSContext* cx, HandleObject envChain,
+ HandleScript script, jsbytecode* pc,
+ MutableHandleValue vp) {
+ RootedObject env(cx), pobj(cx);
+ Rooted<PropertyName*> name(cx, script->getName(pc));
+ PropertyResult prop;
+
+ MOZ_ALWAYS_TRUE(LookupName(cx, name, envChain, &env, &pobj, &prop));
+ MOZ_ASSERT(env && env->is<ModuleEnvironmentObject>());
+ MOZ_ASSERT(env->as<ModuleEnvironmentObject>().hasImportBinding(name));
+ return FetchName<GetNameMode::Normal>(cx, env, pobj, name, prop, vp);
+}
+
+static JSObject* SuperFunOperation(JSObject* callee) {
+ MOZ_ASSERT(callee->as<JSFunction>().isClassConstructor());
+ MOZ_ASSERT(
+ callee->as<JSFunction>().baseScript()->isDerivedClassConstructor());
+
+ return callee->as<JSFunction>().staticPrototype();
+}
+
+static JSObject* HomeObjectSuperBase(JSObject* homeObj) {
+ MOZ_ASSERT(homeObj->is<PlainObject>() || homeObj->is<JSFunction>());
+
+ return homeObj->staticPrototype();
+}
+
+bool js::ReportIsNotFunction(JSContext* cx, HandleValue v, int numToSkip,
+ MaybeConstruct construct) {
+ unsigned error = construct ? JSMSG_NOT_CONSTRUCTOR : JSMSG_NOT_FUNCTION;
+ int spIndex = numToSkip >= 0 ? -(numToSkip + 1) : JSDVG_SEARCH_STACK;
+
+ ReportValueError(cx, error, spIndex, v, nullptr);
+ return false;
+}
+
+JSObject* js::ValueToCallable(JSContext* cx, HandleValue v, int numToSkip,
+ MaybeConstruct construct) {
+ if (v.isObject() && v.toObject().isCallable()) {
+ return &v.toObject();
+ }
+
+ ReportIsNotFunction(cx, v, numToSkip, construct);
+ return nullptr;
+}
+
+static bool MaybeCreateThisForConstructor(JSContext* cx, const CallArgs& args) {
+ if (args.thisv().isObject()) {
+ return true;
+ }
+
+ RootedFunction callee(cx, &args.callee().as<JSFunction>());
+ RootedObject newTarget(cx, &args.newTarget().toObject());
+
+ MOZ_ASSERT(callee->hasBytecode());
+
+ if (!CreateThis(cx, callee, newTarget, GenericObject, args.mutableThisv())) {
+ return false;
+ }
+
+ // Ensure the callee still has a non-lazy script. We normally don't relazify
+ // in active compartments, but the .prototype lookup might have called the
+ // relazifyFunctions testing function that doesn't have this restriction.
+ return JSFunction::getOrCreateScript(cx, callee);
+}
+
+#ifdef ENABLE_RECORD_TUPLE
+static bool AddRecordSpreadOperation(JSContext* cx, HandleValue recHandle,
+ HandleValue spreadeeHandle) {
+ MOZ_ASSERT(recHandle.toExtendedPrimitive().is<RecordType>());
+ RecordType* rec = &recHandle.toExtendedPrimitive().as<RecordType>();
+
+ RootedObject obj(cx, ToObjectOrGetObjectPayload(cx, spreadeeHandle));
+
+ RootedIdVector keys(cx);
+ if (!GetPropertyKeys(cx, obj, JSITER_OWNONLY | JSITER_SYMBOLS, &keys)) {
+ return false;
+ }
+
+ size_t len = keys.length();
+ RootedId propKey(cx);
+ RootedValue propValue(cx);
+ for (size_t i = 0; i < len; i++) {
+ propKey.set(keys[i]);
+
+ // Step 4.c.ii.1.
+ if (MOZ_UNLIKELY(!GetProperty(cx, obj, obj, propKey, &propValue))) {
+ return false;
+ }
+
+ if (MOZ_UNLIKELY(!rec->initializeNextProperty(cx, propKey, propValue))) {
+ return false;
+ }
+ }
+
+ return true;
+}
+#endif
+
+InterpreterFrame* InvokeState::pushInterpreterFrame(JSContext* cx) {
+ return cx->interpreterStack().pushInvokeFrame(cx, args_, construct_);
+}
+
+InterpreterFrame* ExecuteState::pushInterpreterFrame(JSContext* cx) {
+ return cx->interpreterStack().pushExecuteFrame(cx, script_, envChain_,
+ evalInFrame_);
+}
+
+InterpreterFrame* RunState::pushInterpreterFrame(JSContext* cx) {
+ if (isInvoke()) {
+ return asInvoke()->pushInterpreterFrame(cx);
+ }
+ return asExecute()->pushInterpreterFrame(cx);
+}
+
+static MOZ_ALWAYS_INLINE bool MaybeEnterInterpreterTrampoline(JSContext* cx,
+ RunState& state) {
+#ifdef NIGHTLY_BUILD
+ if (jit::JitOptions.emitInterpreterEntryTrampoline &&
+ cx->runtime()->hasJitRuntime()) {
+ js::jit::JitRuntime* jitRuntime = cx->runtime()->jitRuntime();
+ JSScript* script = state.script();
+
+ uint8_t* codeRaw = nullptr;
+ auto p = jitRuntime->getInterpreterEntryMap()->lookup(script);
+ if (p) {
+ codeRaw = p->value().raw();
+ } else if (js::jit::JitCode* code =
+ jitRuntime->generateEntryTrampolineForScript(cx, script)) {
+ js::jit::EntryTrampoline entry(cx, code);
+ if (!jitRuntime->getInterpreterEntryMap()->put(script, entry)) {
+ return false;
+ }
+ codeRaw = code->raw();
+ }
+
+ MOZ_ASSERT(codeRaw, "Should have a valid trampoline here.");
+ // The C++ entry thunk is located at the vmInterpreterEntryOffset offset.
+ codeRaw += jitRuntime->vmInterpreterEntryOffset();
+ return js::jit::EnterInterpreterEntryTrampoline(codeRaw, cx, &state);
+ }
+#endif
+ return Interpret(cx, state);
+}
+
+// MSVC with PGO inlines a lot of functions in RunScript, resulting in large
+// stack frames and stack overflow issues, see bug 1167883. Turn off PGO to
+// avoid this.
+#ifdef _MSC_VER
+# pragma optimize("g", off)
+#endif
+bool js::RunScript(JSContext* cx, RunState& state) {
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return false;
+ }
+
+ MOZ_ASSERT_IF(cx->runtime()->hasJitRuntime(),
+ !cx->runtime()->jitRuntime()->disallowArbitraryCode());
+
+ // Since any script can conceivably GC, make sure it's safe to do so.
+ cx->verifyIsSafeToGC();
+
+ MOZ_ASSERT(cx->realm() == state.script()->realm());
+
+ MOZ_DIAGNOSTIC_ASSERT(cx->realm()->isSystem() ||
+ cx->runtime()->allowContentJS());
+
+ if (!DebugAPI::checkNoExecute(cx, state.script())) {
+ return false;
+ }
+
+ GeckoProfilerEntryMarker marker(cx, state.script());
+
+ bool measuringTime = !cx->isMeasuringExecutionTime();
+ mozilla::TimeStamp startTime;
+ if (measuringTime) {
+ cx->setIsMeasuringExecutionTime(true);
+ cx->setIsExecuting(true);
+ startTime = mozilla::TimeStamp::Now();
+ }
+ auto timerEnd = mozilla::MakeScopeExit([&]() {
+ if (measuringTime) {
+ mozilla::TimeDuration delta = mozilla::TimeStamp::Now() - startTime;
+ cx->realm()->timers.executionTime += delta;
+ cx->setIsMeasuringExecutionTime(false);
+ cx->setIsExecuting(false);
+ }
+ });
+
+ jit::EnterJitStatus status = jit::MaybeEnterJit(cx, state);
+ switch (status) {
+ case jit::EnterJitStatus::Error:
+ return false;
+ case jit::EnterJitStatus::Ok:
+ return true;
+ case jit::EnterJitStatus::NotEntered:
+ break;
+ }
+
+ bool ok = MaybeEnterInterpreterTrampoline(cx, state);
+
+ return ok;
+}
+#ifdef _MSC_VER
+# pragma optimize("", on)
+#endif
+
+STATIC_PRECONDITION_ASSUME(ubound(args.argv_) >= argc)
+MOZ_ALWAYS_INLINE bool CallJSNative(JSContext* cx, Native native,
+ CallReason reason, const CallArgs& args) {
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return false;
+ }
+
+ NativeResumeMode resumeMode = DebugAPI::onNativeCall(cx, args, reason);
+ if (resumeMode != NativeResumeMode::Continue) {
+ return resumeMode == NativeResumeMode::Override;
+ }
+
+#ifdef DEBUG
+ bool alreadyThrowing = cx->isExceptionPending();
+#endif
+ cx->check(args);
+ MOZ_ASSERT(!args.callee().is<ProxyObject>());
+
+ AutoRealm ar(cx, &args.callee());
+ bool ok = native(cx, args.length(), args.base());
+ if (ok) {
+ cx->check(args.rval());
+ MOZ_ASSERT_IF(!alreadyThrowing, !cx->isExceptionPending());
+ }
+ return ok;
+}
+
+STATIC_PRECONDITION(ubound(args.argv_) >= argc)
+MOZ_ALWAYS_INLINE bool CallJSNativeConstructor(JSContext* cx, Native native,
+ const CallArgs& args) {
+#ifdef DEBUG
+ RootedObject callee(cx, &args.callee());
+#endif
+
+ MOZ_ASSERT(args.thisv().isMagic());
+ if (!CallJSNative(cx, native, CallReason::Call, args)) {
+ return false;
+ }
+
+ /*
+ * Native constructors must return non-primitive values on success.
+ * Although it is legal, if a constructor returns the callee, there is a
+ * 99.9999% chance it is a bug. If any valid code actually wants the
+ * constructor to return the callee, the assertion can be removed or
+ * (another) conjunct can be added to the antecedent.
+ *
+ * Exceptions:
+ * - (new Object(Object)) returns the callee.
+ * - The bound function construct hook can return an arbitrary object,
+ * including the callee.
+ *
+ * Also allow if this may be due to a debugger hook since fuzzing may let this
+ * happen.
+ */
+ MOZ_ASSERT(args.rval().isObject());
+ MOZ_ASSERT_IF(!JS_IsNativeFunction(callee, obj_construct) &&
+ !callee->is<BoundFunctionObject>() &&
+ !cx->insideDebuggerEvaluationWithOnNativeCallHook,
+ args.rval() != ObjectValue(*callee));
+
+ return true;
+}
+
+/*
+ * Find a function reference and its 'this' value implicit first parameter
+ * under argc arguments on cx's stack, and call the function. Push missing
+ * required arguments, allocate declared local variables, and pop everything
+ * when done. Then push the return value.
+ *
+ * Note: This function DOES NOT call GetThisValue to munge |args.thisv()| if
+ * necessary. The caller (usually the interpreter) must have performed
+ * this step already!
+ */
+bool js::InternalCallOrConstruct(JSContext* cx, const CallArgs& args,
+ MaybeConstruct construct,
+ CallReason reason /* = CallReason::Call */) {
+ MOZ_ASSERT(args.length() <= ARGS_LENGTH_MAX);
+
+ unsigned skipForCallee = args.length() + 1 + (construct == CONSTRUCT);
+ if (args.calleev().isPrimitive()) {
+ return ReportIsNotFunction(cx, args.calleev(), skipForCallee, construct);
+ }
+
+ /* Invoke non-functions. */
+ if (MOZ_UNLIKELY(!args.callee().is<JSFunction>())) {
+ MOZ_ASSERT_IF(construct, !args.callee().isConstructor());
+
+ if (!args.callee().isCallable()) {
+ return ReportIsNotFunction(cx, args.calleev(), skipForCallee, construct);
+ }
+
+ if (args.callee().is<ProxyObject>()) {
+ RootedObject proxy(cx, &args.callee());
+ return Proxy::call(cx, proxy, args);
+ }
+
+ JSNative call = args.callee().callHook();
+ MOZ_ASSERT(call, "isCallable without a callHook?");
+
+ return CallJSNative(cx, call, reason, args);
+ }
+
+ /* Invoke native functions. */
+ RootedFunction fun(cx, &args.callee().as<JSFunction>());
+ if (fun->isNativeFun()) {
+ MOZ_ASSERT_IF(construct, !fun->isConstructor());
+ JSNative native = fun->native();
+ if (!construct && args.ignoresReturnValue() && fun->hasJitInfo()) {
+ const JSJitInfo* jitInfo = fun->jitInfo();
+ if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
+ native = jitInfo->ignoresReturnValueMethod;
+ }
+ }
+ return CallJSNative(cx, native, reason, args);
+ }
+
+ // Self-hosted builtins are considered native by the onNativeCall hook.
+ if (fun->isSelfHostedBuiltin()) {
+ NativeResumeMode resumeMode = DebugAPI::onNativeCall(cx, args, reason);
+ if (resumeMode != NativeResumeMode::Continue) {
+ return resumeMode == NativeResumeMode::Override;
+ }
+ }
+
+ if (!JSFunction::getOrCreateScript(cx, fun)) {
+ return false;
+ }
+
+ /* Run function until JSOp::RetRval, JSOp::Return or error. */
+ InvokeState state(cx, args, construct);
+
+ // Create |this| if we're constructing. Switch to the callee's realm to
+ // ensure this object has the correct realm.
+ AutoRealm ar(cx, state.script());
+ if (construct && !MaybeCreateThisForConstructor(cx, args)) {
+ return false;
+ }
+
+ // Calling class constructors throws an error from the callee's realm.
+ if (construct != CONSTRUCT && fun->isClassConstructor()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CANT_CALL_CLASS_CONSTRUCTOR);
+ return false;
+ }
+
+ bool ok = RunScript(cx, state);
+
+ MOZ_ASSERT_IF(ok && construct, args.rval().isObject());
+ return ok;
+}
+
+// Returns true if the callee needs an outerized |this| object. Outerization
+// means passing the WindowProxy instead of the Window (a GlobalObject) because
+// we must never expose the Window to script. This returns false only for DOM
+// getters or setters.
+static bool CalleeNeedsOuterizedThisObject(const Value& callee) {
+ if (!callee.isObject() || !callee.toObject().is<JSFunction>()) {
+ return true;
+ }
+ JSFunction& fun = callee.toObject().as<JSFunction>();
+ if (!fun.isNativeFun() || !fun.hasJitInfo()) {
+ return true;
+ }
+ return fun.jitInfo()->needsOuterizedThisObject();
+}
+
+static bool InternalCall(JSContext* cx, const AnyInvokeArgs& args,
+ CallReason reason) {
+ MOZ_ASSERT(args.array() + args.length() == args.end(),
+ "must pass calling arguments to a calling attempt");
+
+#ifdef DEBUG
+ // The caller is responsible for calling GetThisObject if needed.
+ if (args.thisv().isObject()) {
+ JSObject* thisObj = &args.thisv().toObject();
+ MOZ_ASSERT_IF(CalleeNeedsOuterizedThisObject(args.calleev()),
+ GetThisObject(thisObj) == thisObj);
+ }
+#endif
+
+ return InternalCallOrConstruct(cx, args, NO_CONSTRUCT, reason);
+}
+
+bool js::CallFromStack(JSContext* cx, const CallArgs& args,
+ CallReason reason /* = CallReason::Call */) {
+ return InternalCall(cx, static_cast<const AnyInvokeArgs&>(args), reason);
+}
+
+// ES7 rev 0c1bd3004329336774cbc90de727cd0cf5f11e93
+// 7.3.12 Call.
+bool js::Call(JSContext* cx, HandleValue fval, HandleValue thisv,
+ const AnyInvokeArgs& args, MutableHandleValue rval,
+ CallReason reason) {
+ // Explicitly qualify these methods to bypass AnyInvokeArgs's deliberate
+ // shadowing.
+ args.CallArgs::setCallee(fval);
+ args.CallArgs::setThis(thisv);
+
+ if (thisv.isObject()) {
+ // If |this| is a global object, it might be a Window and in that case we
+ // usually have to pass the WindowProxy instead.
+ JSObject* thisObj = &thisv.toObject();
+ if (thisObj->is<GlobalObject>()) {
+ if (CalleeNeedsOuterizedThisObject(fval)) {
+ args.mutableThisv().setObject(*GetThisObject(thisObj));
+ }
+ } else {
+ // Fast path: we don't have to do anything if the object isn't a global.
+ MOZ_ASSERT(GetThisObject(thisObj) == thisObj);
+ }
+ }
+
+ if (!InternalCall(cx, args, reason)) {
+ return false;
+ }
+
+ rval.set(args.rval());
+ return true;
+}
+
+static bool InternalConstruct(JSContext* cx, const AnyConstructArgs& args,
+ CallReason reason = CallReason::Call) {
+ MOZ_ASSERT(args.array() + args.length() + 1 == args.end(),
+ "must pass constructing arguments to a construction attempt");
+ MOZ_ASSERT(!FunctionClass.getConstruct());
+ MOZ_ASSERT(!ExtendedFunctionClass.getConstruct());
+
+ // Callers are responsible for enforcing these preconditions.
+ MOZ_ASSERT(IsConstructor(args.calleev()),
+ "trying to construct a value that isn't a constructor");
+ MOZ_ASSERT(IsConstructor(args.CallArgs::newTarget()),
+ "provided new.target value must be a constructor");
+
+ MOZ_ASSERT(args.thisv().isMagic(JS_IS_CONSTRUCTING) ||
+ args.thisv().isObject());
+
+ JSObject& callee = args.callee();
+ if (callee.is<JSFunction>()) {
+ RootedFunction fun(cx, &callee.as<JSFunction>());
+
+ if (fun->isNativeFun()) {
+ return CallJSNativeConstructor(cx, fun->native(), args);
+ }
+
+ if (!InternalCallOrConstruct(cx, args, CONSTRUCT, reason)) {
+ return false;
+ }
+
+ MOZ_ASSERT(args.CallArgs::rval().isObject());
+ return true;
+ }
+
+ if (callee.is<ProxyObject>()) {
+ RootedObject proxy(cx, &callee);
+ return Proxy::construct(cx, proxy, args);
+ }
+
+ JSNative construct = callee.constructHook();
+ MOZ_ASSERT(construct != nullptr, "IsConstructor without a construct hook?");
+
+ return CallJSNativeConstructor(cx, construct, args);
+}
+
+// Check that |callee|, the callee in a |new| expression, is a constructor.
+static bool StackCheckIsConstructorCalleeNewTarget(JSContext* cx,
+ HandleValue callee,
+ HandleValue newTarget) {
+ // Calls from the stack could have any old non-constructor callee.
+ if (!IsConstructor(callee)) {
+ ReportValueError(cx, JSMSG_NOT_CONSTRUCTOR, JSDVG_SEARCH_STACK, callee,
+ nullptr);
+ return false;
+ }
+
+ // The new.target has already been vetted by previous calls, or is the callee.
+ // We can just assert that it's a constructor.
+ MOZ_ASSERT(IsConstructor(newTarget));
+
+ return true;
+}
+
+bool js::ConstructFromStack(JSContext* cx, const CallArgs& args,
+ CallReason reason /* CallReason::Call */) {
+ if (!StackCheckIsConstructorCalleeNewTarget(cx, args.calleev(),
+ args.newTarget())) {
+ return false;
+ }
+
+ return InternalConstruct(cx, static_cast<const AnyConstructArgs&>(args),
+ reason);
+}
+
+bool js::Construct(JSContext* cx, HandleValue fval,
+ const AnyConstructArgs& args, HandleValue newTarget,
+ MutableHandleObject objp) {
+ MOZ_ASSERT(args.thisv().isMagic(JS_IS_CONSTRUCTING));
+
+ // Explicitly qualify to bypass AnyConstructArgs's deliberate shadowing.
+ args.CallArgs::setCallee(fval);
+ args.CallArgs::newTarget().set(newTarget);
+
+ if (!InternalConstruct(cx, args)) {
+ return false;
+ }
+
+ MOZ_ASSERT(args.CallArgs::rval().isObject());
+ objp.set(&args.CallArgs::rval().toObject());
+ return true;
+}
+
+bool js::InternalConstructWithProvidedThis(JSContext* cx, HandleValue fval,
+ HandleValue thisv,
+ const AnyConstructArgs& args,
+ HandleValue newTarget,
+ MutableHandleValue rval) {
+ args.CallArgs::setCallee(fval);
+
+ MOZ_ASSERT(thisv.isObject());
+ args.CallArgs::setThis(thisv);
+
+ args.CallArgs::newTarget().set(newTarget);
+
+ if (!InternalConstruct(cx, args)) {
+ return false;
+ }
+
+ rval.set(args.CallArgs::rval());
+ return true;
+}
+
+bool js::CallGetter(JSContext* cx, HandleValue thisv, HandleValue getter,
+ MutableHandleValue rval) {
+ FixedInvokeArgs<0> args(cx);
+
+ return Call(cx, getter, thisv, args, rval, CallReason::Getter);
+}
+
+bool js::CallSetter(JSContext* cx, HandleValue thisv, HandleValue setter,
+ HandleValue v) {
+ FixedInvokeArgs<1> args(cx);
+ args[0].set(v);
+
+ RootedValue ignored(cx);
+ return Call(cx, setter, thisv, args, &ignored, CallReason::Setter);
+}
+
+bool js::ExecuteKernel(JSContext* cx, HandleScript script,
+ HandleObject envChainArg, AbstractFramePtr evalInFrame,
+ MutableHandleValue result) {
+ MOZ_ASSERT_IF(script->isGlobalCode(),
+ IsGlobalLexicalEnvironment(envChainArg) ||
+ !IsSyntacticEnvironment(envChainArg));
+#ifdef DEBUG
+ RootedObject terminatingEnv(cx, envChainArg);
+ while (IsSyntacticEnvironment(terminatingEnv)) {
+ terminatingEnv = terminatingEnv->enclosingEnvironment();
+ }
+ MOZ_ASSERT(terminatingEnv->is<GlobalObject>() ||
+ script->hasNonSyntacticScope());
+#endif
+
+ if (script->treatAsRunOnce()) {
+ if (script->hasRunOnce()) {
+ JS_ReportErrorASCII(cx,
+ "Trying to execute a run-once script multiple times");
+ return false;
+ }
+
+ script->setHasRunOnce();
+ }
+
+ if (script->isEmpty()) {
+ result.setUndefined();
+ return true;
+ }
+
+ probes::StartExecution(script);
+ ExecuteState state(cx, script, envChainArg, evalInFrame, result);
+ bool ok = RunScript(cx, state);
+ probes::StopExecution(script);
+
+ return ok;
+}
+
+bool js::Execute(JSContext* cx, HandleScript script, HandleObject envChain,
+ MutableHandleValue rval) {
+ /* The env chain is something we control, so we know it can't
+ have any outer objects on it. */
+ MOZ_ASSERT(!IsWindowProxy(envChain));
+
+ if (script->isModule()) {
+ MOZ_RELEASE_ASSERT(
+ envChain == script->module()->environment(),
+ "Module scripts can only be executed in the module's environment");
+ } else {
+ MOZ_RELEASE_ASSERT(
+ IsGlobalLexicalEnvironment(envChain) || script->hasNonSyntacticScope(),
+ "Only global scripts with non-syntactic envs can be executed with "
+ "interesting envchains");
+ }
+
+ /* Ensure the env chain is all same-compartment and terminates in a global. */
+#ifdef DEBUG
+ JSObject* s = envChain;
+ do {
+ cx->check(s);
+ MOZ_ASSERT_IF(!s->enclosingEnvironment(), s->is<GlobalObject>());
+ } while ((s = s->enclosingEnvironment()));
+#endif
+
+ return ExecuteKernel(cx, script, envChain, NullFramePtr() /* evalInFrame */,
+ rval);
+}
+
+/*
+ * ES6 (4-25-16) 12.10.4 InstanceofOperator
+ */
+bool js::InstanceofOperator(JSContext* cx, HandleObject obj, HandleValue v,
+ bool* bp) {
+ /* Step 1. is handled by caller. */
+
+ /* Step 2. */
+ RootedValue hasInstance(cx);
+ RootedId id(cx, PropertyKey::Symbol(cx->wellKnownSymbols().hasInstance));
+ if (!GetProperty(cx, obj, obj, id, &hasInstance)) {
+ return false;
+ }
+
+ if (!hasInstance.isNullOrUndefined()) {
+ if (!IsCallable(hasInstance)) {
+ return ReportIsNotFunction(cx, hasInstance);
+ }
+
+ /* Step 3. */
+ RootedValue rval(cx);
+ if (!Call(cx, hasInstance, obj, v, &rval)) {
+ return false;
+ }
+ *bp = ToBoolean(rval);
+ return true;
+ }
+
+ /* Step 4. */
+ if (!obj->isCallable()) {
+ RootedValue val(cx, ObjectValue(*obj));
+ return ReportIsNotFunction(cx, val);
+ }
+
+ /* Step 5. */
+ return OrdinaryHasInstance(cx, obj, v, bp);
+}
+
+JSType js::TypeOfObject(JSObject* obj) {
+#ifdef ENABLE_RECORD_TUPLE
+ MOZ_ASSERT(!js::IsExtendedPrimitive(*obj));
+#endif
+
+ AutoUnsafeCallWithABI unsafe;
+ if (EmulatesUndefined(obj)) {
+ return JSTYPE_UNDEFINED;
+ }
+ if (obj->isCallable()) {
+ return JSTYPE_FUNCTION;
+ }
+ return JSTYPE_OBJECT;
+}
+
+#ifdef ENABLE_RECORD_TUPLE
+JSType TypeOfExtendedPrimitive(JSObject* obj) {
+ MOZ_ASSERT(js::IsExtendedPrimitive(*obj));
+
+ if (obj->is<RecordType>()) {
+ return JSTYPE_RECORD;
+ }
+ if (obj->is<TupleType>()) {
+ return JSTYPE_TUPLE;
+ }
+ MOZ_CRASH("Unknown ExtendedPrimitive");
+}
+#endif
+
+JSType js::TypeOfValue(const Value& v) {
+ switch (v.type()) {
+ case ValueType::Double:
+ case ValueType::Int32:
+ return JSTYPE_NUMBER;
+ case ValueType::String:
+ return JSTYPE_STRING;
+ case ValueType::Null:
+ return JSTYPE_OBJECT;
+ case ValueType::Undefined:
+ return JSTYPE_UNDEFINED;
+ case ValueType::Object:
+ return TypeOfObject(&v.toObject());
+#ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive:
+ return TypeOfExtendedPrimitive(&v.toExtendedPrimitive());
+#endif
+ case ValueType::Boolean:
+ return JSTYPE_BOOLEAN;
+ case ValueType::BigInt:
+ return JSTYPE_BIGINT;
+ case ValueType::Symbol:
+ return JSTYPE_SYMBOL;
+ case ValueType::Magic:
+ case ValueType::PrivateGCThing:
+ break;
+ }
+
+ ReportBadValueTypeAndCrash(v);
+}
+
+bool js::CheckClassHeritageOperation(JSContext* cx, HandleValue heritage) {
+ if (IsConstructor(heritage)) {
+ return true;
+ }
+
+ if (heritage.isNull()) {
+ return true;
+ }
+
+ if (heritage.isObject()) {
+ ReportIsNotFunction(cx, heritage, 0, CONSTRUCT);
+ return false;
+ }
+
+ ReportValueError(cx, JSMSG_BAD_HERITAGE, -1, heritage, nullptr,
+ "not an object or null");
+ return false;
+}
+
+PlainObject* js::ObjectWithProtoOperation(JSContext* cx, HandleValue val) {
+ if (!val.isObjectOrNull()) {
+ ReportValueError(cx, JSMSG_NOT_OBJORNULL, -1, val, nullptr);
+ return nullptr;
+ }
+
+ RootedObject proto(cx, val.toObjectOrNull());
+ return NewPlainObjectWithProto(cx, proto);
+}
+
+JSObject* js::FunWithProtoOperation(JSContext* cx, HandleFunction fun,
+ HandleObject parent, HandleObject proto) {
+ return CloneFunctionReuseScript(cx, fun, parent, proto);
+}
+
+/*
+ * Enter the new with environment using an object at sp[-1] and associate the
+ * depth of the with block with sp + stackIndex.
+ */
+bool js::EnterWithOperation(JSContext* cx, AbstractFramePtr frame,
+ HandleValue val, Handle<WithScope*> scope) {
+ RootedObject obj(cx);
+ if (val.isObject()) {
+ obj = &val.toObject();
+ } else {
+ obj = ToObject(cx, val);
+ if (!obj) {
+ return false;
+ }
+ }
+
+ RootedObject envChain(cx, frame.environmentChain());
+ WithEnvironmentObject* withobj =
+ WithEnvironmentObject::create(cx, obj, envChain, scope);
+ if (!withobj) {
+ return false;
+ }
+
+ frame.pushOnEnvironmentChain(*withobj);
+ return true;
+}
+
+static void PopEnvironment(JSContext* cx, EnvironmentIter& ei) {
+ switch (ei.scope().kind()) {
+ case ScopeKind::Lexical:
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ case ScopeKind::NamedLambda:
+ case ScopeKind::StrictNamedLambda:
+ case ScopeKind::FunctionLexical:
+ case ScopeKind::ClassBody:
+ if (MOZ_UNLIKELY(cx->realm()->isDebuggee())) {
+ DebugEnvironments::onPopLexical(cx, ei);
+ }
+ if (ei.scope().hasEnvironment()) {
+ ei.initialFrame()
+ .popOffEnvironmentChain<ScopedLexicalEnvironmentObject>();
+ }
+ break;
+ case ScopeKind::With:
+ if (MOZ_UNLIKELY(cx->realm()->isDebuggee())) {
+ DebugEnvironments::onPopWith(ei.initialFrame());
+ }
+ ei.initialFrame().popOffEnvironmentChain<WithEnvironmentObject>();
+ break;
+ case ScopeKind::Function:
+ if (MOZ_UNLIKELY(cx->realm()->isDebuggee())) {
+ DebugEnvironments::onPopCall(cx, ei.initialFrame());
+ }
+ if (ei.scope().hasEnvironment()) {
+ ei.initialFrame().popOffEnvironmentChain<CallObject>();
+ }
+ break;
+ case ScopeKind::FunctionBodyVar:
+ case ScopeKind::StrictEval:
+ if (MOZ_UNLIKELY(cx->realm()->isDebuggee())) {
+ DebugEnvironments::onPopVar(cx, ei);
+ }
+ if (ei.scope().hasEnvironment()) {
+ ei.initialFrame().popOffEnvironmentChain<VarEnvironmentObject>();
+ }
+ break;
+ case ScopeKind::Module:
+ if (MOZ_UNLIKELY(cx->realm()->isDebuggee())) {
+ DebugEnvironments::onPopModule(cx, ei);
+ }
+ break;
+ case ScopeKind::Eval:
+ case ScopeKind::Global:
+ case ScopeKind::NonSyntactic:
+ break;
+ case ScopeKind::WasmInstance:
+ case ScopeKind::WasmFunction:
+ MOZ_CRASH("wasm is not interpreted");
+ break;
+ }
+}
+
+// Unwind environment chain and iterator to match the env corresponding to
+// the given bytecode position.
+void js::UnwindEnvironment(JSContext* cx, EnvironmentIter& ei, jsbytecode* pc) {
+ if (!ei.withinInitialFrame()) {
+ return;
+ }
+
+ Rooted<Scope*> scope(cx, ei.initialFrame().script()->innermostScope(pc));
+
+#ifdef DEBUG
+ // A frame's environment chain cannot be unwound to anything enclosing the
+ // body scope of a script. This includes the parameter defaults
+ // environment and the decl env object. These environments, once pushed
+ // onto the environment chain, are expected to be there for the duration
+ // of the frame.
+ //
+ // Attempting to unwind to the parameter defaults code in a script is a
+ // bug; that section of code has no try-catch blocks.
+ JSScript* script = ei.initialFrame().script();
+ for (uint32_t i = 0; i < script->bodyScopeIndex(); i++) {
+ MOZ_ASSERT(scope != script->getScope(GCThingIndex(i)));
+ }
+#endif
+
+ for (; ei.maybeScope() != scope; ei++) {
+ PopEnvironment(cx, ei);
+ }
+}
+
+// Unwind all environments. This is needed because block scopes may cover the
+// first bytecode at a script's main(). e.g.,
+//
+// function f() { { let i = 0; } }
+//
+// will have no pc location distinguishing the first block scope from the
+// outermost function scope.
+void js::UnwindAllEnvironmentsInFrame(JSContext* cx, EnvironmentIter& ei) {
+ for (; ei.withinInitialFrame(); ei++) {
+ PopEnvironment(cx, ei);
+ }
+}
+
+// Compute the pc needed to unwind the environment to the beginning of a try
+// block. We cannot unwind to *after* the JSOp::Try, because that might be the
+// first opcode of an inner scope, with the same problem as above. e.g.,
+//
+// try { { let x; } }
+//
+// will have no pc location distinguishing the try block scope from the inner
+// let block scope.
+jsbytecode* js::UnwindEnvironmentToTryPc(JSScript* script, const TryNote* tn) {
+ jsbytecode* pc = script->offsetToPC(tn->start);
+ if (tn->kind() == TryNoteKind::Catch || tn->kind() == TryNoteKind::Finally) {
+ pc -= JSOpLength_Try;
+ MOZ_ASSERT(JSOp(*pc) == JSOp::Try);
+ } else if (tn->kind() == TryNoteKind::Destructuring) {
+ pc -= JSOpLength_TryDestructuring;
+ MOZ_ASSERT(JSOp(*pc) == JSOp::TryDestructuring);
+ }
+ return pc;
+}
+
+static void SettleOnTryNote(JSContext* cx, const TryNote* tn,
+ EnvironmentIter& ei, InterpreterRegs& regs) {
+ // Unwind the environment to the beginning of the JSOp::Try.
+ UnwindEnvironment(cx, ei, UnwindEnvironmentToTryPc(regs.fp()->script(), tn));
+
+ // Set pc to the first bytecode after the the try note to point
+ // to the beginning of catch or finally.
+ regs.pc = regs.fp()->script()->offsetToPC(tn->start + tn->length);
+ regs.sp = regs.spForStackDepth(tn->stackDepth);
+}
+
+class InterpreterTryNoteFilter {
+ const InterpreterRegs& regs_;
+
+ public:
+ explicit InterpreterTryNoteFilter(const InterpreterRegs& regs)
+ : regs_(regs) {}
+ bool operator()(const TryNote* note) {
+ return note->stackDepth <= regs_.stackDepth();
+ }
+};
+
+class TryNoteIterInterpreter : public TryNoteIter<InterpreterTryNoteFilter> {
+ public:
+ TryNoteIterInterpreter(JSContext* cx, const InterpreterRegs& regs)
+ : TryNoteIter(cx, regs.fp()->script(), regs.pc,
+ InterpreterTryNoteFilter(regs)) {}
+};
+
+static void UnwindIteratorsForUncatchableException(
+ JSContext* cx, const InterpreterRegs& regs) {
+ // c.f. the regular (catchable) TryNoteIterInterpreter loop in
+ // ProcessTryNotes.
+ for (TryNoteIterInterpreter tni(cx, regs); !tni.done(); ++tni) {
+ const TryNote* tn = *tni;
+ switch (tn->kind()) {
+ case TryNoteKind::ForIn: {
+ Value* sp = regs.spForStackDepth(tn->stackDepth);
+ UnwindIteratorForUncatchableException(&sp[-1].toObject());
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+enum HandleErrorContinuation {
+ SuccessfulReturnContinuation,
+ ErrorReturnContinuation,
+ CatchContinuation,
+ FinallyContinuation
+};
+
+static HandleErrorContinuation ProcessTryNotes(JSContext* cx,
+ EnvironmentIter& ei,
+ InterpreterRegs& regs) {
+ for (TryNoteIterInterpreter tni(cx, regs); !tni.done(); ++tni) {
+ const TryNote* tn = *tni;
+
+ switch (tn->kind()) {
+ case TryNoteKind::Catch:
+ /* Catch cannot intercept the closing of a generator. */
+ if (cx->isClosingGenerator()) {
+ break;
+ }
+
+ SettleOnTryNote(cx, tn, ei, regs);
+ return CatchContinuation;
+
+ case TryNoteKind::Finally:
+ SettleOnTryNote(cx, tn, ei, regs);
+ return FinallyContinuation;
+
+ case TryNoteKind::ForIn: {
+ /* This is similar to JSOp::EndIter in the interpreter loop. */
+ MOZ_ASSERT(tn->stackDepth <= regs.stackDepth());
+ Value* sp = regs.spForStackDepth(tn->stackDepth);
+ JSObject* obj = &sp[-1].toObject();
+ CloseIterator(obj);
+ break;
+ }
+
+ case TryNoteKind::Destructuring: {
+ // Whether the destructuring iterator is done is at the top of the
+ // stack. The iterator object is second from the top.
+ MOZ_ASSERT(tn->stackDepth > 1);
+ Value* sp = regs.spForStackDepth(tn->stackDepth);
+ RootedValue doneValue(cx, sp[-1]);
+ MOZ_RELEASE_ASSERT(!doneValue.isMagic());
+ bool done = ToBoolean(doneValue);
+ if (!done) {
+ RootedObject iterObject(cx, &sp[-2].toObject());
+ if (!IteratorCloseForException(cx, iterObject)) {
+ SettleOnTryNote(cx, tn, ei, regs);
+ return ErrorReturnContinuation;
+ }
+ }
+ break;
+ }
+
+ case TryNoteKind::ForOf:
+ case TryNoteKind::Loop:
+ break;
+
+ // TryNoteKind::ForOfIterClose is handled internally by the try note
+ // iterator.
+ default:
+ MOZ_CRASH("Invalid try note");
+ }
+ }
+
+ return SuccessfulReturnContinuation;
+}
+
+bool js::HandleClosingGeneratorReturn(JSContext* cx, AbstractFramePtr frame,
+ bool ok) {
+ /*
+ * Propagate the exception or error to the caller unless the exception
+ * is an asynchronous return from a generator.
+ */
+ if (cx->isClosingGenerator()) {
+ cx->clearPendingException();
+ ok = true;
+ auto* genObj = GetGeneratorObjectForFrame(cx, frame);
+ genObj->setClosed();
+ }
+ return ok;
+}
+
+static HandleErrorContinuation HandleError(JSContext* cx,
+ InterpreterRegs& regs) {
+ MOZ_ASSERT(regs.fp()->script()->containsPC(regs.pc));
+ MOZ_ASSERT(cx->realm() == regs.fp()->script()->realm());
+
+ if (regs.fp()->script()->hasScriptCounts()) {
+ PCCounts* counts = regs.fp()->script()->getThrowCounts(regs.pc);
+ // If we failed to allocate, then skip the increment and continue to
+ // handle the exception.
+ if (counts) {
+ counts->numExec()++;
+ }
+ }
+
+ EnvironmentIter ei(cx, regs.fp(), regs.pc);
+ bool ok = false;
+
+again:
+ if (cx->isExceptionPending()) {
+ /* Call debugger throw hooks. */
+ if (!cx->isClosingGenerator()) {
+ if (!DebugAPI::onExceptionUnwind(cx, regs.fp())) {
+ if (!cx->isExceptionPending()) {
+ goto again;
+ }
+ }
+ // Ensure that the debugger hasn't returned 'true' while clearing the
+ // exception state.
+ MOZ_ASSERT(cx->isExceptionPending());
+ }
+
+ HandleErrorContinuation res = ProcessTryNotes(cx, ei, regs);
+ switch (res) {
+ case SuccessfulReturnContinuation:
+ break;
+ case ErrorReturnContinuation:
+ goto again;
+ case CatchContinuation:
+ case FinallyContinuation:
+ // No need to increment the PCCounts number of execution here, as
+ // the interpreter increments any PCCounts if present.
+ MOZ_ASSERT_IF(regs.fp()->script()->hasScriptCounts(),
+ regs.fp()->script()->maybeGetPCCounts(regs.pc));
+ return res;
+ }
+
+ ok = HandleClosingGeneratorReturn(cx, regs.fp(), ok);
+ } else {
+ UnwindIteratorsForUncatchableException(cx, regs);
+
+ // We may be propagating a forced return from a debugger hook function.
+ if (MOZ_UNLIKELY(cx->isPropagatingForcedReturn())) {
+ cx->clearPropagatingForcedReturn();
+ ok = true;
+ }
+ }
+
+ ok = DebugAPI::onLeaveFrame(cx, regs.fp(), regs.pc, ok);
+
+ // After this point, we will pop the frame regardless. Settle the frame on
+ // the end of the script.
+ regs.setToEndOfScript();
+
+ return ok ? SuccessfulReturnContinuation : ErrorReturnContinuation;
+}
+
+#define REGS (activation.regs())
+#define PUSH_COPY(v) \
+ do { \
+ *REGS.sp++ = (v); \
+ cx->debugOnlyCheck(REGS.sp[-1]); \
+ } while (0)
+#define PUSH_COPY_SKIP_CHECK(v) *REGS.sp++ = (v)
+#define PUSH_NULL() REGS.sp++->setNull()
+#define PUSH_UNDEFINED() REGS.sp++->setUndefined()
+#define PUSH_BOOLEAN(b) REGS.sp++->setBoolean(b)
+#define PUSH_DOUBLE(d) REGS.sp++->setDouble(d)
+#define PUSH_INT32(i) REGS.sp++->setInt32(i)
+#define PUSH_SYMBOL(s) REGS.sp++->setSymbol(s)
+#define PUSH_BIGINT(b) REGS.sp++->setBigInt(b)
+#define PUSH_STRING(s) \
+ do { \
+ REGS.sp++->setString(s); \
+ cx->debugOnlyCheck(REGS.sp[-1]); \
+ } while (0)
+#define PUSH_OBJECT(obj) \
+ do { \
+ REGS.sp++->setObject(obj); \
+ cx->debugOnlyCheck(REGS.sp[-1]); \
+ } while (0)
+#define PUSH_OBJECT_OR_NULL(obj) \
+ do { \
+ REGS.sp++->setObjectOrNull(obj); \
+ cx->debugOnlyCheck(REGS.sp[-1]); \
+ } while (0)
+#ifdef ENABLE_RECORD_TUPLE
+# define PUSH_EXTENDED_PRIMITIVE(obj) \
+ do { \
+ REGS.sp++->setExtendedPrimitive(obj); \
+ cx->debugOnlyCheck(REGS.sp[-1]); \
+ } while (0)
+#endif
+#define PUSH_MAGIC(magic) REGS.sp++->setMagic(magic)
+#define POP_COPY_TO(v) (v) = *--REGS.sp
+#define POP_RETURN_VALUE() REGS.fp()->setReturnValue(*--REGS.sp)
+
+/*
+ * Same for JSOp::SetName and JSOp::SetProp, which differ only slightly but
+ * remain distinct for the decompiler.
+ */
+static_assert(JSOpLength_SetName == JSOpLength_SetProp);
+
+/* See TRY_BRANCH_AFTER_COND. */
+static_assert(JSOpLength_JumpIfTrue == JSOpLength_JumpIfFalse);
+static_assert(uint8_t(JSOp::JumpIfTrue) == uint8_t(JSOp::JumpIfFalse) + 1);
+
+/*
+ * Compute the implicit |this| value used by a call expression with an
+ * unqualified name reference. The environment the binding was found on is
+ * passed as argument, env.
+ *
+ * The implicit |this| is |undefined| for all environment types except
+ * WithEnvironmentObject. This is the case for |with(...) {...}| expressions or
+ * if the embedding uses a non-syntactic WithEnvironmentObject.
+ *
+ * NOTE: A non-syntactic WithEnvironmentObject may have a corresponding
+ * extensible LexicalEnviornmentObject, but it will not be considered as an
+ * implicit |this|. This is for compatibility with the Gecko subscript loader.
+ */
+static inline Value ComputeImplicitThis(JSObject* env) {
+ // Fast-path for GlobalObject
+ if (env->is<GlobalObject>()) {
+ return UndefinedValue();
+ }
+
+ // WithEnvironmentObjects have an actual implicit |this|
+ if (env->is<WithEnvironmentObject>()) {
+ return ObjectValue(*GetThisObjectOfWith(env));
+ }
+
+ // Debugger environments need special casing, as despite being
+ // non-syntactic, they wrap syntactic environments and should not be
+ // treated like other embedding-specific non-syntactic environments.
+ if (env->is<DebugEnvironmentProxy>()) {
+ return ComputeImplicitThis(&env->as<DebugEnvironmentProxy>().environment());
+ }
+
+ MOZ_ASSERT(env->is<EnvironmentObject>());
+ return UndefinedValue();
+}
+
+static MOZ_ALWAYS_INLINE bool AddOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ MutableHandleValue res) {
+ if (lhs.isInt32() && rhs.isInt32()) {
+ int32_t l = lhs.toInt32(), r = rhs.toInt32();
+ int32_t t;
+ if (MOZ_LIKELY(SafeAdd(l, r, &t))) {
+ res.setInt32(t);
+ return true;
+ }
+ }
+
+ if (!ToPrimitive(cx, lhs)) {
+ return false;
+ }
+ if (!ToPrimitive(cx, rhs)) {
+ return false;
+ }
+
+ bool lIsString = lhs.isString();
+ bool rIsString = rhs.isString();
+ if (lIsString || rIsString) {
+ JSString* lstr;
+ if (lIsString) {
+ lstr = lhs.toString();
+ } else {
+ lstr = ToString<CanGC>(cx, lhs);
+ if (!lstr) {
+ return false;
+ }
+ }
+
+ JSString* rstr;
+ if (rIsString) {
+ rstr = rhs.toString();
+ } else {
+ // Save/restore lstr in case of GC activity under ToString.
+ lhs.setString(lstr);
+ rstr = ToString<CanGC>(cx, rhs);
+ if (!rstr) {
+ return false;
+ }
+ lstr = lhs.toString();
+ }
+ JSString* str = ConcatStrings<NoGC>(cx, lstr, rstr);
+ if (!str) {
+ RootedString nlstr(cx, lstr), nrstr(cx, rstr);
+ str = ConcatStrings<CanGC>(cx, nlstr, nrstr);
+ if (!str) {
+ return false;
+ }
+ }
+ res.setString(str);
+ return true;
+ }
+
+ if (!ToNumeric(cx, lhs) || !ToNumeric(cx, rhs)) {
+ return false;
+ }
+
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ return BigInt::addValue(cx, lhs, rhs, res);
+ }
+
+ res.setNumber(lhs.toNumber() + rhs.toNumber());
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool SubOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ MutableHandleValue res) {
+ if (!ToNumeric(cx, lhs) || !ToNumeric(cx, rhs)) {
+ return false;
+ }
+
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ return BigInt::subValue(cx, lhs, rhs, res);
+ }
+
+ res.setNumber(lhs.toNumber() - rhs.toNumber());
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool MulOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ MutableHandleValue res) {
+ if (!ToNumeric(cx, lhs) || !ToNumeric(cx, rhs)) {
+ return false;
+ }
+
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ return BigInt::mulValue(cx, lhs, rhs, res);
+ }
+
+ res.setNumber(lhs.toNumber() * rhs.toNumber());
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool DivOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ MutableHandleValue res) {
+ if (!ToNumeric(cx, lhs) || !ToNumeric(cx, rhs)) {
+ return false;
+ }
+
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ return BigInt::divValue(cx, lhs, rhs, res);
+ }
+
+ res.setNumber(NumberDiv(lhs.toNumber(), rhs.toNumber()));
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool ModOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ MutableHandleValue res) {
+ int32_t l, r;
+ if (lhs.isInt32() && rhs.isInt32() && (l = lhs.toInt32()) >= 0 &&
+ (r = rhs.toInt32()) > 0) {
+ int32_t mod = l % r;
+ res.setInt32(mod);
+ return true;
+ }
+
+ if (!ToNumeric(cx, lhs) || !ToNumeric(cx, rhs)) {
+ return false;
+ }
+
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ return BigInt::modValue(cx, lhs, rhs, res);
+ }
+
+ res.setNumber(NumberMod(lhs.toNumber(), rhs.toNumber()));
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool PowOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ MutableHandleValue res) {
+ if (!ToNumeric(cx, lhs) || !ToNumeric(cx, rhs)) {
+ return false;
+ }
+
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ return BigInt::powValue(cx, lhs, rhs, res);
+ }
+
+ res.setNumber(ecmaPow(lhs.toNumber(), rhs.toNumber()));
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool BitNotOperation(JSContext* cx,
+ MutableHandleValue in,
+ MutableHandleValue out) {
+ if (!ToInt32OrBigInt(cx, in)) {
+ return false;
+ }
+
+ if (in.isBigInt()) {
+ return BigInt::bitNotValue(cx, in, out);
+ }
+
+ out.setInt32(~in.toInt32());
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool BitXorOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ MutableHandleValue out) {
+ if (!ToInt32OrBigInt(cx, lhs) || !ToInt32OrBigInt(cx, rhs)) {
+ return false;
+ }
+
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ return BigInt::bitXorValue(cx, lhs, rhs, out);
+ }
+
+ out.setInt32(lhs.toInt32() ^ rhs.toInt32());
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool BitOrOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ MutableHandleValue out) {
+ if (!ToInt32OrBigInt(cx, lhs) || !ToInt32OrBigInt(cx, rhs)) {
+ return false;
+ }
+
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ return BigInt::bitOrValue(cx, lhs, rhs, out);
+ }
+
+ out.setInt32(lhs.toInt32() | rhs.toInt32());
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool BitAndOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ MutableHandleValue out) {
+ if (!ToInt32OrBigInt(cx, lhs) || !ToInt32OrBigInt(cx, rhs)) {
+ return false;
+ }
+
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ return BigInt::bitAndValue(cx, lhs, rhs, out);
+ }
+
+ out.setInt32(lhs.toInt32() & rhs.toInt32());
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool BitLshOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ MutableHandleValue out) {
+ if (!ToInt32OrBigInt(cx, lhs) || !ToInt32OrBigInt(cx, rhs)) {
+ return false;
+ }
+
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ return BigInt::lshValue(cx, lhs, rhs, out);
+ }
+
+ // Signed left-shift is undefined on overflow, so |lhs << (rhs & 31)| won't
+ // work. Instead, convert to unsigned space (where overflow is treated
+ // modularly), perform the operation there, then convert back.
+ uint32_t left = static_cast<uint32_t>(lhs.toInt32());
+ uint8_t right = rhs.toInt32() & 31;
+ out.setInt32(mozilla::WrapToSigned(left << right));
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool BitRshOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ MutableHandleValue out) {
+ if (!ToInt32OrBigInt(cx, lhs) || !ToInt32OrBigInt(cx, rhs)) {
+ return false;
+ }
+
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ return BigInt::rshValue(cx, lhs, rhs, out);
+ }
+
+ out.setInt32(lhs.toInt32() >> (rhs.toInt32() & 31));
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool UrshOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ MutableHandleValue out) {
+ if (!ToNumeric(cx, lhs) || !ToNumeric(cx, rhs)) {
+ return false;
+ }
+
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BIGINT_TO_NUMBER);
+ return false;
+ }
+
+ uint32_t left;
+ int32_t right;
+ if (!ToUint32(cx, lhs, &left) || !ToInt32(cx, rhs, &right)) {
+ return false;
+ }
+ left >>= right & 31;
+ out.setNumber(uint32_t(left));
+ return true;
+}
+
+// BigInt proposal 3.2.4 Abstract Relational Comparison
+// Returns Nothing when at least one operand is a NaN, or when
+// ToNumeric or StringToBigInt can't interpret a string as a numeric
+// value. (These cases correspond to a NaN result in the spec.)
+// Otherwise, return a boolean to indicate whether lhs is less than
+// rhs. The operands must be primitives; the caller is responsible for
+// evaluating them in the correct order.
+static MOZ_ALWAYS_INLINE bool LessThanImpl(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ mozilla::Maybe<bool>& res) {
+ // Steps 1 and 2 are performed by the caller.
+
+ // Step 3.
+ if (lhs.isString() && rhs.isString()) {
+ JSString* l = lhs.toString();
+ JSString* r = rhs.toString();
+ int32_t result;
+ if (!CompareStrings(cx, l, r, &result)) {
+ return false;
+ }
+ res = mozilla::Some(result < 0);
+ return true;
+ }
+
+ // Step 4a.
+ if (lhs.isBigInt() && rhs.isString()) {
+ return BigInt::lessThan(cx, lhs, rhs, res);
+ }
+
+ // Step 4b.
+ if (lhs.isString() && rhs.isBigInt()) {
+ return BigInt::lessThan(cx, lhs, rhs, res);
+ }
+
+ // Steps 4c and 4d.
+ if (!ToNumeric(cx, lhs) || !ToNumeric(cx, rhs)) {
+ return false;
+ }
+
+ // Steps 4e-j.
+ if (lhs.isBigInt() || rhs.isBigInt()) {
+ return BigInt::lessThan(cx, lhs, rhs, res);
+ }
+
+ // Step 4e for Number operands.
+ MOZ_ASSERT(lhs.isNumber() && rhs.isNumber());
+ double lhsNum = lhs.toNumber();
+ double rhsNum = rhs.toNumber();
+
+ if (std::isnan(lhsNum) || std::isnan(rhsNum)) {
+ res = mozilla::Maybe<bool>(mozilla::Nothing());
+ return true;
+ }
+
+ res = mozilla::Some(lhsNum < rhsNum);
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool LessThanOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ bool* res) {
+ if (lhs.isInt32() && rhs.isInt32()) {
+ *res = lhs.toInt32() < rhs.toInt32();
+ return true;
+ }
+
+ if (!ToPrimitive(cx, JSTYPE_NUMBER, lhs)) {
+ return false;
+ }
+
+ if (!ToPrimitive(cx, JSTYPE_NUMBER, rhs)) {
+ return false;
+ }
+
+ mozilla::Maybe<bool> tmpResult;
+ if (!LessThanImpl(cx, lhs, rhs, tmpResult)) {
+ return false;
+ }
+ *res = tmpResult.valueOr(false);
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool LessThanOrEqualOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ bool* res) {
+ if (lhs.isInt32() && rhs.isInt32()) {
+ *res = lhs.toInt32() <= rhs.toInt32();
+ return true;
+ }
+
+ if (!ToPrimitive(cx, JSTYPE_NUMBER, lhs)) {
+ return false;
+ }
+
+ if (!ToPrimitive(cx, JSTYPE_NUMBER, rhs)) {
+ return false;
+ }
+
+ mozilla::Maybe<bool> tmpResult;
+ if (!LessThanImpl(cx, rhs, lhs, tmpResult)) {
+ return false;
+ }
+ *res = !tmpResult.valueOr(true);
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool GreaterThanOperation(JSContext* cx,
+ MutableHandleValue lhs,
+ MutableHandleValue rhs,
+ bool* res) {
+ if (lhs.isInt32() && rhs.isInt32()) {
+ *res = lhs.toInt32() > rhs.toInt32();
+ return true;
+ }
+
+ if (!ToPrimitive(cx, JSTYPE_NUMBER, lhs)) {
+ return false;
+ }
+
+ if (!ToPrimitive(cx, JSTYPE_NUMBER, rhs)) {
+ return false;
+ }
+
+ mozilla::Maybe<bool> tmpResult;
+ if (!LessThanImpl(cx, rhs, lhs, tmpResult)) {
+ return false;
+ }
+ *res = tmpResult.valueOr(false);
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool GreaterThanOrEqualOperation(
+ JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs, bool* res) {
+ if (lhs.isInt32() && rhs.isInt32()) {
+ *res = lhs.toInt32() >= rhs.toInt32();
+ return true;
+ }
+
+ if (!ToPrimitive(cx, JSTYPE_NUMBER, lhs)) {
+ return false;
+ }
+
+ if (!ToPrimitive(cx, JSTYPE_NUMBER, rhs)) {
+ return false;
+ }
+
+ mozilla::Maybe<bool> tmpResult;
+ if (!LessThanImpl(cx, lhs, rhs, tmpResult)) {
+ return false;
+ }
+ *res = !tmpResult.valueOr(true);
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool SetObjectElementOperation(
+ JSContext* cx, HandleObject obj, HandleId id, HandleValue value,
+ HandleValue receiver, bool strict) {
+ ObjectOpResult result;
+ return SetProperty(cx, obj, id, value, receiver, result) &&
+ result.checkStrictModeError(cx, obj, id, strict);
+}
+
+static MOZ_ALWAYS_INLINE void InitElemArrayOperation(JSContext* cx,
+ jsbytecode* pc,
+ Handle<ArrayObject*> arr,
+ HandleValue val) {
+ MOZ_ASSERT(JSOp(*pc) == JSOp::InitElemArray);
+
+ // The dense elements must have been initialized up to this index. The JIT
+ // implementation also depends on this.
+ uint32_t index = GET_UINT32(pc);
+ MOZ_ASSERT(index < arr->getDenseCapacity());
+ MOZ_ASSERT(index == arr->getDenseInitializedLength());
+
+ // Bump the initialized length even for hole values to ensure the
+ // index == initLength invariant holds for later InitElemArray ops.
+ arr->setDenseInitializedLength(index + 1);
+
+ if (val.isMagic(JS_ELEMENTS_HOLE)) {
+ arr->initDenseElementHole(index);
+ } else {
+ arr->initDenseElement(index, val);
+ }
+}
+
+/*
+ * As an optimization, the interpreter creates a handful of reserved Rooted<T>
+ * variables at the beginning, thus inserting them into the Rooted list once
+ * upon entry. ReservedRooted "borrows" a reserved Rooted variable and uses it
+ * within a local scope, resetting the value to nullptr (or the appropriate
+ * equivalent for T) at scope end. This avoids inserting/removing the Rooted
+ * from the rooter list, while preventing stale values from being kept alive
+ * unnecessarily.
+ */
+
+template <typename T>
+class ReservedRooted : public RootedOperations<T, ReservedRooted<T>> {
+ Rooted<T>* savedRoot;
+
+ public:
+ ReservedRooted(Rooted<T>* root, const T& ptr) : savedRoot(root) {
+ *root = ptr;
+ }
+
+ explicit ReservedRooted(Rooted<T>* root) : savedRoot(root) {
+ *root = JS::SafelyInitialized<T>::create();
+ }
+
+ ~ReservedRooted() { *savedRoot = JS::SafelyInitialized<T>::create(); }
+
+ void set(const T& p) const { *savedRoot = p; }
+ operator Handle<T>() { return *savedRoot; }
+ operator Rooted<T>&() { return *savedRoot; }
+ MutableHandle<T> operator&() { return &*savedRoot; }
+
+ DECLARE_NONPOINTER_ACCESSOR_METHODS(savedRoot->get())
+ DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(savedRoot->get())
+ DECLARE_POINTER_CONSTREF_OPS(T)
+ DECLARE_POINTER_ASSIGN_OPS(ReservedRooted, T)
+};
+
+void js::ReportInNotObjectError(JSContext* cx, HandleValue lref,
+ HandleValue rref) {
+ auto uniqueCharsFromString = [](JSContext* cx,
+ HandleValue ref) -> UniqueChars {
+ static const size_t MaxStringLength = 16;
+ RootedString str(cx, ref.toString());
+ if (str->length() > MaxStringLength) {
+ JSStringBuilder buf(cx);
+ if (!buf.appendSubstring(str, 0, MaxStringLength)) {
+ return nullptr;
+ }
+ if (!buf.append("...")) {
+ return nullptr;
+ }
+ str = buf.finishString();
+ if (!str) {
+ return nullptr;
+ }
+ }
+ return QuoteString(cx, str, '"');
+ };
+
+ if (lref.isString() && rref.isString()) {
+ UniqueChars lbytes = uniqueCharsFromString(cx, lref);
+ if (!lbytes) {
+ return;
+ }
+ UniqueChars rbytes = uniqueCharsFromString(cx, rref);
+ if (!rbytes) {
+ return;
+ }
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_IN_STRING,
+ lbytes.get(), rbytes.get());
+ return;
+ }
+
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_IN_NOT_OBJECT,
+ InformalValueTypeName(rref));
+}
+
+bool MOZ_NEVER_INLINE JS_HAZ_JSNATIVE_CALLER js::Interpret(JSContext* cx,
+ RunState& state) {
+/*
+ * Define macros for an interpreter loop. Opcode dispatch is done by
+ * indirect goto (aka a threaded interpreter), which is technically
+ * non-standard but is supported by all of our supported compilers.
+ */
+#define INTERPRETER_LOOP()
+#define CASE(OP) label_##OP:
+#define DEFAULT() \
+ label_default:
+#define DISPATCH_TO(OP) goto* addresses[(OP)]
+
+#define LABEL(X) (&&label_##X)
+
+ // Use addresses instead of offsets to optimize for runtime speed over
+ // load-time relocation overhead.
+ static const void* const addresses[EnableInterruptsPseudoOpcode + 1] = {
+#define OPCODE_LABEL(op, ...) LABEL(op),
+ FOR_EACH_OPCODE(OPCODE_LABEL)
+#undef OPCODE_LABEL
+#define TRAILING_LABEL(v) \
+ ((v) == EnableInterruptsPseudoOpcode ? LABEL(EnableInterruptsPseudoOpcode) \
+ : LABEL(default)),
+ FOR_EACH_TRAILING_UNUSED_OPCODE(TRAILING_LABEL)
+#undef TRAILING_LABEL
+ };
+
+ /*
+ * Increment REGS.pc by N, load the opcode at that position,
+ * and jump to the code to execute it.
+ *
+ * When Debugger puts a script in single-step mode, all js::Interpret
+ * invocations that might be presently running that script must have
+ * interrupts enabled. It's not practical to simply check
+ * script->stepModeEnabled() at each point some callee could have changed
+ * it, because there are so many places js::Interpret could possibly cause
+ * JavaScript to run: each place an object might be coerced to a primitive
+ * or a number, for example. So instead, we expose a simple mechanism to
+ * let Debugger tweak the affected js::Interpret frames when an onStep
+ * handler is added: calling activation.enableInterruptsUnconditionally()
+ * will enable interrupts, and activation.opMask() is or'd with the opcode
+ * to implement a simple alternate dispatch.
+ */
+#define ADVANCE_AND_DISPATCH(N) \
+ JS_BEGIN_MACRO \
+ REGS.pc += (N); \
+ SANITY_CHECKS(); \
+ DISPATCH_TO(*REGS.pc | activation.opMask()); \
+ JS_END_MACRO
+
+ /*
+ * Shorthand for the common sequence at the end of a fixed-size opcode.
+ */
+#define END_CASE(OP) ADVANCE_AND_DISPATCH(JSOpLength_##OP);
+
+ /*
+ * Prepare to call a user-supplied branch handler, and abort the script
+ * if it returns false.
+ */
+#define CHECK_BRANCH() \
+ JS_BEGIN_MACRO \
+ if (!CheckForInterrupt(cx)) goto error; \
+ JS_END_MACRO
+
+ /*
+ * This is a simple wrapper around ADVANCE_AND_DISPATCH which also does
+ * a CHECK_BRANCH() if n is not positive, which possibly indicates that it
+ * is the backedge of a loop.
+ */
+#define BRANCH(n) \
+ JS_BEGIN_MACRO \
+ int32_t nlen = (n); \
+ if (nlen <= 0) CHECK_BRANCH(); \
+ ADVANCE_AND_DISPATCH(nlen); \
+ JS_END_MACRO
+
+ /*
+ * Initialize code coverage vectors.
+ */
+#define INIT_COVERAGE() \
+ JS_BEGIN_MACRO \
+ if (!script->hasScriptCounts()) { \
+ if (cx->realm()->collectCoverageForDebug()) { \
+ if (!script->initScriptCounts(cx)) goto error; \
+ } \
+ } \
+ JS_END_MACRO
+
+ /*
+ * Increment the code coverage counter associated with the given pc.
+ */
+#define COUNT_COVERAGE_PC(PC) \
+ JS_BEGIN_MACRO \
+ if (script->hasScriptCounts()) { \
+ PCCounts* counts = script->maybeGetPCCounts(PC); \
+ MOZ_ASSERT(counts); \
+ counts->numExec()++; \
+ } \
+ JS_END_MACRO
+
+#define COUNT_COVERAGE_MAIN() \
+ JS_BEGIN_MACRO \
+ jsbytecode* main = script->main(); \
+ if (!BytecodeIsJumpTarget(JSOp(*main))) COUNT_COVERAGE_PC(main); \
+ JS_END_MACRO
+
+#define COUNT_COVERAGE() \
+ JS_BEGIN_MACRO \
+ MOZ_ASSERT(BytecodeIsJumpTarget(JSOp(*REGS.pc))); \
+ COUNT_COVERAGE_PC(REGS.pc); \
+ JS_END_MACRO
+
+#define SET_SCRIPT(s) \
+ JS_BEGIN_MACRO \
+ script = (s); \
+ MOZ_ASSERT(cx->realm() == script->realm()); \
+ if (DebugAPI::hasAnyBreakpointsOrStepMode(script) || \
+ script->hasScriptCounts()) \
+ activation.enableInterruptsUnconditionally(); \
+ JS_END_MACRO
+
+#define SANITY_CHECKS() \
+ JS_BEGIN_MACRO \
+ js::gc::MaybeVerifyBarriers(cx); \
+ JS_END_MACRO
+
+// Verify that an uninitialized lexical is followed by a correct check op.
+#ifdef DEBUG
+# define ASSERT_UNINITIALIZED_ALIASED_LEXICAL(val) \
+ JS_BEGIN_MACRO \
+ if (IsUninitializedLexical(val)) { \
+ JSOp next = JSOp(*GetNextPc(REGS.pc)); \
+ MOZ_ASSERT(next == JSOp::CheckThis || next == JSOp::CheckReturn || \
+ next == JSOp::CheckThisReinit || \
+ next == JSOp::CheckAliasedLexical); \
+ } \
+ JS_END_MACRO
+#else
+# define ASSERT_UNINITIALIZED_ALIASED_LEXICAL(val) \
+ JS_BEGIN_MACRO \
+ /* nothing */ \
+ JS_END_MACRO
+#endif
+
+ gc::MaybeVerifyBarriers(cx, true);
+
+ InterpreterFrame* entryFrame = state.pushInterpreterFrame(cx);
+ if (!entryFrame) {
+ return false;
+ }
+
+ ActivationEntryMonitor entryMonitor(cx, entryFrame);
+ InterpreterActivation activation(state, cx, entryFrame);
+
+ /* The script is used frequently, so keep a local copy. */
+ RootedScript script(cx);
+ SET_SCRIPT(REGS.fp()->script());
+
+ /*
+ * Pool of rooters for use in this interpreter frame. References to these
+ * are used for local variables within interpreter cases. This avoids
+ * creating new rooters each time an interpreter case is entered, and also
+ * correctness pitfalls due to incorrect compilation of destructor calls
+ * around computed gotos.
+ */
+ RootedValue rootValue0(cx), rootValue1(cx);
+ RootedObject rootObject0(cx), rootObject1(cx);
+ RootedFunction rootFunction0(cx);
+ Rooted<JSAtom*> rootAtom0(cx);
+ Rooted<PropertyName*> rootName0(cx);
+ RootedId rootId0(cx);
+ RootedScript rootScript0(cx);
+ Rooted<Scope*> rootScope0(cx);
+ DebugOnly<uint32_t> blockDepth;
+
+ /* State communicated between non-local jumps: */
+ bool interpReturnOK;
+ bool frameHalfInitialized;
+
+ if (!activation.entryFrame()->prologue(cx)) {
+ goto prologue_error;
+ }
+
+ if (!DebugAPI::onEnterFrame(cx, activation.entryFrame())) {
+ goto error;
+ }
+
+ // Increment the coverage for the main entry point.
+ INIT_COVERAGE();
+ COUNT_COVERAGE_MAIN();
+
+ // Enter the interpreter loop starting at the current pc.
+ ADVANCE_AND_DISPATCH(0);
+
+ INTERPRETER_LOOP() {
+ CASE(EnableInterruptsPseudoOpcode) {
+ bool moreInterrupts = false;
+ jsbytecode op = *REGS.pc;
+
+ if (!script->hasScriptCounts() &&
+ cx->realm()->collectCoverageForDebug()) {
+ if (!script->initScriptCounts(cx)) {
+ goto error;
+ }
+ }
+
+ if (script->isDebuggee()) {
+ if (DebugAPI::stepModeEnabled(script)) {
+ if (!DebugAPI::onSingleStep(cx)) {
+ goto error;
+ }
+ moreInterrupts = true;
+ }
+
+ if (DebugAPI::hasAnyBreakpointsOrStepMode(script)) {
+ moreInterrupts = true;
+ }
+
+ if (DebugAPI::hasBreakpointsAt(script, REGS.pc)) {
+ if (!DebugAPI::onTrap(cx)) {
+ goto error;
+ }
+ }
+ }
+
+ MOZ_ASSERT(activation.opMask() == EnableInterruptsPseudoOpcode);
+ if (!moreInterrupts) {
+ activation.clearInterruptsMask();
+ }
+
+ /* Commence executing the actual opcode. */
+ SANITY_CHECKS();
+ DISPATCH_TO(op);
+ }
+
+ /* Various 1-byte no-ops. */
+ CASE(Nop)
+ CASE(Try)
+ CASE(NopDestructuring)
+ CASE(TryDestructuring) {
+ MOZ_ASSERT(GetBytecodeLength(REGS.pc) == 1);
+ ADVANCE_AND_DISPATCH(1);
+ }
+
+ CASE(JumpTarget)
+ COUNT_COVERAGE();
+ END_CASE(JumpTarget)
+
+ CASE(LoopHead) {
+ COUNT_COVERAGE();
+
+ // Attempt on-stack replacement into the Baseline Interpreter.
+ if (jit::IsBaselineInterpreterEnabled()) {
+ script->incWarmUpCounter();
+
+ jit::MethodStatus status =
+ jit::CanEnterBaselineInterpreterAtBranch(cx, REGS.fp());
+ if (status == jit::Method_Error) {
+ goto error;
+ }
+ if (status == jit::Method_Compiled) {
+ bool wasProfiler = REGS.fp()->hasPushedGeckoProfilerFrame();
+
+ jit::JitExecStatus maybeOsr;
+ {
+ GeckoProfilerBaselineOSRMarker osr(cx, wasProfiler);
+ maybeOsr =
+ jit::EnterBaselineInterpreterAtBranch(cx, REGS.fp(), REGS.pc);
+ }
+
+ // We failed to call into baseline at all, so treat as an error.
+ if (maybeOsr == jit::JitExec_Aborted) {
+ goto error;
+ }
+
+ interpReturnOK = (maybeOsr == jit::JitExec_Ok);
+
+ // Pop the profiler frame pushed by the interpreter. (The compiled
+ // version of the function popped a copy of the frame pushed by the
+ // OSR trampoline.)
+ if (wasProfiler) {
+ cx->geckoProfiler().exit(cx, script);
+ }
+
+ if (activation.entryFrame() != REGS.fp()) {
+ goto jit_return_pop_frame;
+ }
+ goto leave_on_safe_point;
+ }
+ }
+ }
+ END_CASE(LoopHead)
+
+ CASE(Lineno)
+ END_CASE(Lineno)
+
+ CASE(ForceInterpreter) {
+ // Ensure pattern matching still works.
+ MOZ_ASSERT(script->hasForceInterpreterOp());
+ }
+ END_CASE(ForceInterpreter)
+
+ CASE(Undefined) { PUSH_UNDEFINED(); }
+ END_CASE(Undefined)
+
+ CASE(Pop) { REGS.sp--; }
+ END_CASE(Pop)
+
+ CASE(PopN) {
+ MOZ_ASSERT(GET_UINT16(REGS.pc) <= REGS.stackDepth());
+ REGS.sp -= GET_UINT16(REGS.pc);
+ }
+ END_CASE(PopN)
+
+ CASE(DupAt) {
+ MOZ_ASSERT(GET_UINT24(REGS.pc) < REGS.stackDepth());
+ unsigned i = GET_UINT24(REGS.pc);
+ const Value& rref = REGS.sp[-int(i + 1)];
+ PUSH_COPY(rref);
+ }
+ END_CASE(DupAt)
+
+ CASE(SetRval) { POP_RETURN_VALUE(); }
+ END_CASE(SetRval)
+
+ CASE(GetRval) { PUSH_COPY(REGS.fp()->returnValue()); }
+ END_CASE(GetRval)
+
+ CASE(EnterWith) {
+ ReservedRooted<Value> val(&rootValue0, REGS.sp[-1]);
+ REGS.sp--;
+ ReservedRooted<Scope*> scope(&rootScope0, script->getScope(REGS.pc));
+
+ if (!EnterWithOperation(cx, REGS.fp(), val, scope.as<WithScope>())) {
+ goto error;
+ }
+ }
+ END_CASE(EnterWith)
+
+ CASE(LeaveWith) {
+ REGS.fp()->popOffEnvironmentChain<WithEnvironmentObject>();
+ }
+ END_CASE(LeaveWith)
+
+ CASE(Return) {
+ POP_RETURN_VALUE();
+ /* FALL THROUGH */
+ }
+ CASE(RetRval) {
+ /*
+ * When the inlined frame exits with an exception or an error, ok will be
+ * false after the inline_return label.
+ */
+ CHECK_BRANCH();
+
+ successful_return_continuation:
+ interpReturnOK = true;
+
+ return_continuation:
+ frameHalfInitialized = false;
+
+ prologue_return_continuation:
+
+ if (activation.entryFrame() != REGS.fp()) {
+ // Stop the engine. (No details about which engine exactly, could be
+ // interpreter, Baseline or IonMonkey.)
+ if (MOZ_LIKELY(!frameHalfInitialized)) {
+ interpReturnOK =
+ DebugAPI::onLeaveFrame(cx, REGS.fp(), REGS.pc, interpReturnOK);
+
+ REGS.fp()->epilogue(cx, REGS.pc);
+ }
+
+ jit_return_pop_frame:
+
+ activation.popInlineFrame(REGS.fp());
+ {
+ JSScript* callerScript = REGS.fp()->script();
+ if (cx->realm() != callerScript->realm()) {
+ cx->leaveRealm(callerScript->realm());
+ }
+ SET_SCRIPT(callerScript);
+ }
+
+ jit_return:
+
+ MOZ_ASSERT(IsInvokePC(REGS.pc));
+ MOZ_ASSERT(cx->realm() == script->realm());
+
+ /* Resume execution in the calling frame. */
+ if (MOZ_LIKELY(interpReturnOK)) {
+ if (JSOp(*REGS.pc) == JSOp::Resume) {
+ ADVANCE_AND_DISPATCH(JSOpLength_Resume);
+ }
+
+ MOZ_ASSERT(GetBytecodeLength(REGS.pc) == JSOpLength_Call);
+ ADVANCE_AND_DISPATCH(JSOpLength_Call);
+ }
+
+ goto error;
+ } else {
+ // Stack should be empty for the outer frame, unless we executed the
+ // first |await| expression in an async function.
+ MOZ_ASSERT(REGS.stackDepth() == 0 ||
+ (JSOp(*REGS.pc) == JSOp::Await &&
+ !REGS.fp()->isResumedGenerator()));
+ }
+ goto exit;
+ }
+
+ CASE(Default) {
+ REGS.sp--;
+ /* FALL THROUGH */
+ }
+ CASE(Goto) { BRANCH(GET_JUMP_OFFSET(REGS.pc)); }
+
+ CASE(JumpIfFalse) {
+ bool cond = ToBoolean(REGS.stackHandleAt(-1));
+ REGS.sp--;
+ if (!cond) {
+ BRANCH(GET_JUMP_OFFSET(REGS.pc));
+ }
+ }
+ END_CASE(JumpIfFalse)
+
+ CASE(JumpIfTrue) {
+ bool cond = ToBoolean(REGS.stackHandleAt(-1));
+ REGS.sp--;
+ if (cond) {
+ BRANCH(GET_JUMP_OFFSET(REGS.pc));
+ }
+ }
+ END_CASE(JumpIfTrue)
+
+ CASE(Or) {
+ bool cond = ToBoolean(REGS.stackHandleAt(-1));
+ if (cond) {
+ ADVANCE_AND_DISPATCH(GET_JUMP_OFFSET(REGS.pc));
+ }
+ }
+ END_CASE(Or)
+
+ CASE(Coalesce) {
+ MutableHandleValue res = REGS.stackHandleAt(-1);
+ bool cond = !res.isNullOrUndefined();
+ if (cond) {
+ ADVANCE_AND_DISPATCH(GET_JUMP_OFFSET(REGS.pc));
+ }
+ }
+ END_CASE(Coalesce)
+
+ CASE(And) {
+ bool cond = ToBoolean(REGS.stackHandleAt(-1));
+ if (!cond) {
+ ADVANCE_AND_DISPATCH(GET_JUMP_OFFSET(REGS.pc));
+ }
+ }
+ END_CASE(And)
+
+#define FETCH_ELEMENT_ID(n, id) \
+ JS_BEGIN_MACRO \
+ if (!ToPropertyKey(cx, REGS.stackHandleAt(n), &(id))) goto error; \
+ JS_END_MACRO
+
+#define TRY_BRANCH_AFTER_COND(cond, spdec) \
+ JS_BEGIN_MACRO \
+ MOZ_ASSERT(GetBytecodeLength(REGS.pc) == 1); \
+ unsigned diff_ = \
+ (unsigned)GET_UINT8(REGS.pc) - (unsigned)JSOp::JumpIfFalse; \
+ if (diff_ <= 1) { \
+ REGS.sp -= (spdec); \
+ if ((cond) == (diff_ != 0)) { \
+ ++REGS.pc; \
+ BRANCH(GET_JUMP_OFFSET(REGS.pc)); \
+ } \
+ ADVANCE_AND_DISPATCH(1 + JSOpLength_JumpIfFalse); \
+ } \
+ JS_END_MACRO
+
+ CASE(In) {
+ HandleValue rref = REGS.stackHandleAt(-1);
+ if (!rref.isObject()) {
+ HandleValue lref = REGS.stackHandleAt(-2);
+ ReportInNotObjectError(cx, lref, rref);
+ goto error;
+ }
+ bool found;
+ {
+ ReservedRooted<JSObject*> obj(&rootObject0, &rref.toObject());
+ ReservedRooted<jsid> id(&rootId0);
+ FETCH_ELEMENT_ID(-2, id);
+ if (!HasProperty(cx, obj, id, &found)) {
+ goto error;
+ }
+ }
+ TRY_BRANCH_AFTER_COND(found, 2);
+ REGS.sp--;
+ REGS.sp[-1].setBoolean(found);
+ }
+ END_CASE(In)
+
+ CASE(HasOwn) {
+ HandleValue val = REGS.stackHandleAt(-1);
+ HandleValue idval = REGS.stackHandleAt(-2);
+
+ bool found;
+ if (!HasOwnProperty(cx, val, idval, &found)) {
+ goto error;
+ }
+
+ REGS.sp--;
+ REGS.sp[-1].setBoolean(found);
+ }
+ END_CASE(HasOwn)
+
+ CASE(CheckPrivateField) {
+ /* Load the object being initialized into lval/val. */
+ HandleValue val = REGS.stackHandleAt(-2);
+ HandleValue idval = REGS.stackHandleAt(-1);
+
+ bool result = false;
+ if (!CheckPrivateFieldOperation(cx, REGS.pc, val, idval, &result)) {
+ goto error;
+ }
+
+ PUSH_BOOLEAN(result);
+ }
+ END_CASE(CheckPrivateField)
+
+ CASE(NewPrivateName) {
+ ReservedRooted<JSAtom*> name(&rootAtom0, script->getAtom(REGS.pc));
+
+ auto* symbol = NewPrivateName(cx, name);
+ if (!symbol) {
+ goto error;
+ }
+
+ PUSH_SYMBOL(symbol);
+ }
+ END_CASE(NewPrivateName)
+
+ CASE(IsNullOrUndefined) {
+ bool b = REGS.sp[-1].isNullOrUndefined();
+ PUSH_BOOLEAN(b);
+ }
+ END_CASE(IsNullOrUndefined)
+
+ CASE(Iter) {
+ MOZ_ASSERT(REGS.stackDepth() >= 1);
+ HandleValue val = REGS.stackHandleAt(-1);
+ JSObject* iter = ValueToIterator(cx, val);
+ if (!iter) {
+ goto error;
+ }
+ REGS.sp[-1].setObject(*iter);
+ }
+ END_CASE(Iter)
+
+ CASE(MoreIter) {
+ MOZ_ASSERT(REGS.stackDepth() >= 1);
+ MOZ_ASSERT(REGS.sp[-1].isObject());
+ Value v = IteratorMore(&REGS.sp[-1].toObject());
+ PUSH_COPY(v);
+ }
+ END_CASE(MoreIter)
+
+ CASE(IsNoIter) {
+ bool b = REGS.sp[-1].isMagic(JS_NO_ITER_VALUE);
+ PUSH_BOOLEAN(b);
+ }
+ END_CASE(IsNoIter)
+
+ CASE(EndIter) {
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+ CloseIterator(&REGS.sp[-2].toObject());
+ REGS.sp -= 2;
+ }
+ END_CASE(EndIter)
+
+ CASE(CloseIter) {
+ ReservedRooted<JSObject*> iter(&rootObject0, &REGS.sp[-1].toObject());
+ CompletionKind kind = CompletionKind(GET_UINT8(REGS.pc));
+ if (!CloseIterOperation(cx, iter, kind)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(CloseIter)
+
+ CASE(IsGenClosing) {
+ bool b = REGS.sp[-1].isMagic(JS_GENERATOR_CLOSING);
+ PUSH_BOOLEAN(b);
+ }
+ END_CASE(IsGenClosing)
+
+ CASE(Dup) {
+ MOZ_ASSERT(REGS.stackDepth() >= 1);
+ const Value& rref = REGS.sp[-1];
+ PUSH_COPY(rref);
+ }
+ END_CASE(Dup)
+
+ CASE(Dup2) {
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+ const Value& lref = REGS.sp[-2];
+ const Value& rref = REGS.sp[-1];
+ PUSH_COPY(lref);
+ PUSH_COPY(rref);
+ }
+ END_CASE(Dup2)
+
+ CASE(Swap) {
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+ Value& lref = REGS.sp[-2];
+ Value& rref = REGS.sp[-1];
+ lref.swap(rref);
+ }
+ END_CASE(Swap)
+
+ CASE(Pick) {
+ unsigned i = GET_UINT8(REGS.pc);
+ MOZ_ASSERT(REGS.stackDepth() >= i + 1);
+ Value lval = REGS.sp[-int(i + 1)];
+ memmove(REGS.sp - (i + 1), REGS.sp - i, sizeof(Value) * i);
+ REGS.sp[-1] = lval;
+ }
+ END_CASE(Pick)
+
+ CASE(Unpick) {
+ int i = GET_UINT8(REGS.pc);
+ MOZ_ASSERT(REGS.stackDepth() >= unsigned(i) + 1);
+ Value lval = REGS.sp[-1];
+ memmove(REGS.sp - i, REGS.sp - (i + 1), sizeof(Value) * i);
+ REGS.sp[-(i + 1)] = lval;
+ }
+ END_CASE(Unpick)
+
+ CASE(BindGName)
+ CASE(BindName) {
+ JSOp op = JSOp(*REGS.pc);
+ ReservedRooted<JSObject*> envChain(&rootObject0);
+ if (op == JSOp::BindName) {
+ envChain.set(REGS.fp()->environmentChain());
+ } else {
+ MOZ_ASSERT(!script->hasNonSyntacticScope());
+ envChain.set(&REGS.fp()->global().lexicalEnvironment());
+ }
+ ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
+
+ // Assigning to an undeclared name adds a property to the global object.
+ ReservedRooted<JSObject*> env(&rootObject1);
+ if (!LookupNameUnqualified(cx, name, envChain, &env)) {
+ goto error;
+ }
+
+ PUSH_OBJECT(*env);
+
+ static_assert(JSOpLength_BindName == JSOpLength_BindGName,
+ "We're sharing the END_CASE so the lengths better match");
+ }
+ END_CASE(BindName)
+
+ CASE(BindVar) {
+ JSObject* varObj = BindVarOperation(cx, REGS.fp()->environmentChain());
+ PUSH_OBJECT(*varObj);
+ }
+ END_CASE(BindVar)
+
+ CASE(BitOr) {
+ MutableHandleValue lhs = REGS.stackHandleAt(-2);
+ MutableHandleValue rhs = REGS.stackHandleAt(-1);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+ if (!BitOrOperation(cx, lhs, rhs, res)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(BitOr)
+
+ CASE(BitXor) {
+ MutableHandleValue lhs = REGS.stackHandleAt(-2);
+ MutableHandleValue rhs = REGS.stackHandleAt(-1);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+ if (!BitXorOperation(cx, lhs, rhs, res)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(BitXor)
+
+ CASE(BitAnd) {
+ MutableHandleValue lhs = REGS.stackHandleAt(-2);
+ MutableHandleValue rhs = REGS.stackHandleAt(-1);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+ if (!BitAndOperation(cx, lhs, rhs, res)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(BitAnd)
+
+ CASE(Eq) {
+ if (!LooseEqualityOp<true>(cx, REGS)) {
+ goto error;
+ }
+ }
+ END_CASE(Eq)
+
+ CASE(Ne) {
+ if (!LooseEqualityOp<false>(cx, REGS)) {
+ goto error;
+ }
+ }
+ END_CASE(Ne)
+
+#define STRICT_EQUALITY_OP(OP, COND) \
+ JS_BEGIN_MACRO \
+ HandleValue lval = REGS.stackHandleAt(-2); \
+ HandleValue rval = REGS.stackHandleAt(-1); \
+ bool equal; \
+ if (!js::StrictlyEqual(cx, lval, rval, &equal)) { \
+ goto error; \
+ } \
+ (COND) = equal OP true; \
+ REGS.sp--; \
+ JS_END_MACRO
+
+ CASE(StrictEq) {
+ bool cond;
+ STRICT_EQUALITY_OP(==, cond);
+ REGS.sp[-1].setBoolean(cond);
+ }
+ END_CASE(StrictEq)
+
+ CASE(StrictNe) {
+ bool cond;
+ STRICT_EQUALITY_OP(!=, cond);
+ REGS.sp[-1].setBoolean(cond);
+ }
+ END_CASE(StrictNe)
+
+#undef STRICT_EQUALITY_OP
+
+ CASE(Case) {
+ bool cond = REGS.sp[-1].toBoolean();
+ REGS.sp--;
+ if (cond) {
+ REGS.sp--;
+ BRANCH(GET_JUMP_OFFSET(REGS.pc));
+ }
+ }
+ END_CASE(Case)
+
+ CASE(Lt) {
+ bool cond;
+ MutableHandleValue lval = REGS.stackHandleAt(-2);
+ MutableHandleValue rval = REGS.stackHandleAt(-1);
+ if (!LessThanOperation(cx, lval, rval, &cond)) {
+ goto error;
+ }
+ TRY_BRANCH_AFTER_COND(cond, 2);
+ REGS.sp[-2].setBoolean(cond);
+ REGS.sp--;
+ }
+ END_CASE(Lt)
+
+ CASE(Le) {
+ bool cond;
+ MutableHandleValue lval = REGS.stackHandleAt(-2);
+ MutableHandleValue rval = REGS.stackHandleAt(-1);
+ if (!LessThanOrEqualOperation(cx, lval, rval, &cond)) {
+ goto error;
+ }
+ TRY_BRANCH_AFTER_COND(cond, 2);
+ REGS.sp[-2].setBoolean(cond);
+ REGS.sp--;
+ }
+ END_CASE(Le)
+
+ CASE(Gt) {
+ bool cond;
+ MutableHandleValue lval = REGS.stackHandleAt(-2);
+ MutableHandleValue rval = REGS.stackHandleAt(-1);
+ if (!GreaterThanOperation(cx, lval, rval, &cond)) {
+ goto error;
+ }
+ TRY_BRANCH_AFTER_COND(cond, 2);
+ REGS.sp[-2].setBoolean(cond);
+ REGS.sp--;
+ }
+ END_CASE(Gt)
+
+ CASE(Ge) {
+ bool cond;
+ MutableHandleValue lval = REGS.stackHandleAt(-2);
+ MutableHandleValue rval = REGS.stackHandleAt(-1);
+ if (!GreaterThanOrEqualOperation(cx, lval, rval, &cond)) {
+ goto error;
+ }
+ TRY_BRANCH_AFTER_COND(cond, 2);
+ REGS.sp[-2].setBoolean(cond);
+ REGS.sp--;
+ }
+ END_CASE(Ge)
+
+ CASE(Lsh) {
+ MutableHandleValue lhs = REGS.stackHandleAt(-2);
+ MutableHandleValue rhs = REGS.stackHandleAt(-1);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+ if (!BitLshOperation(cx, lhs, rhs, res)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(Lsh)
+
+ CASE(Rsh) {
+ MutableHandleValue lhs = REGS.stackHandleAt(-2);
+ MutableHandleValue rhs = REGS.stackHandleAt(-1);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+ if (!BitRshOperation(cx, lhs, rhs, res)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(Rsh)
+
+ CASE(Ursh) {
+ MutableHandleValue lhs = REGS.stackHandleAt(-2);
+ MutableHandleValue rhs = REGS.stackHandleAt(-1);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+ if (!UrshOperation(cx, lhs, rhs, res)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(Ursh)
+
+ CASE(Add) {
+ MutableHandleValue lval = REGS.stackHandleAt(-2);
+ MutableHandleValue rval = REGS.stackHandleAt(-1);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+ if (!AddOperation(cx, lval, rval, res)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(Add)
+
+ CASE(Sub) {
+ ReservedRooted<Value> lval(&rootValue0, REGS.sp[-2]);
+ ReservedRooted<Value> rval(&rootValue1, REGS.sp[-1]);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+ if (!SubOperation(cx, &lval, &rval, res)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(Sub)
+
+ CASE(Mul) {
+ ReservedRooted<Value> lval(&rootValue0, REGS.sp[-2]);
+ ReservedRooted<Value> rval(&rootValue1, REGS.sp[-1]);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+ if (!MulOperation(cx, &lval, &rval, res)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(Mul)
+
+ CASE(Div) {
+ ReservedRooted<Value> lval(&rootValue0, REGS.sp[-2]);
+ ReservedRooted<Value> rval(&rootValue1, REGS.sp[-1]);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+ if (!DivOperation(cx, &lval, &rval, res)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(Div)
+
+ CASE(Mod) {
+ ReservedRooted<Value> lval(&rootValue0, REGS.sp[-2]);
+ ReservedRooted<Value> rval(&rootValue1, REGS.sp[-1]);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+ if (!ModOperation(cx, &lval, &rval, res)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(Mod)
+
+ CASE(Pow) {
+ ReservedRooted<Value> lval(&rootValue0, REGS.sp[-2]);
+ ReservedRooted<Value> rval(&rootValue1, REGS.sp[-1]);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+ if (!PowOperation(cx, &lval, &rval, res)) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(Pow)
+
+ CASE(Not) {
+ bool cond = ToBoolean(REGS.stackHandleAt(-1));
+ REGS.sp--;
+ PUSH_BOOLEAN(!cond);
+ }
+ END_CASE(Not)
+
+ CASE(BitNot) {
+ MutableHandleValue val = REGS.stackHandleAt(-1);
+ if (!BitNotOperation(cx, val, val)) {
+ goto error;
+ }
+ }
+ END_CASE(BitNot)
+
+ CASE(Neg) {
+ MutableHandleValue val = REGS.stackHandleAt(-1);
+ if (!NegOperation(cx, val, val)) {
+ goto error;
+ }
+ }
+ END_CASE(Neg)
+
+ CASE(Pos) {
+ if (!ToNumber(cx, REGS.stackHandleAt(-1))) {
+ goto error;
+ }
+ }
+ END_CASE(Pos)
+
+ CASE(DelName) {
+ ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
+ ReservedRooted<JSObject*> envObj(&rootObject0,
+ REGS.fp()->environmentChain());
+
+ PUSH_BOOLEAN(true);
+ MutableHandleValue res = REGS.stackHandleAt(-1);
+ if (!DeleteNameOperation(cx, name, envObj, res)) {
+ goto error;
+ }
+ }
+ END_CASE(DelName)
+
+ CASE(DelProp)
+ CASE(StrictDelProp) {
+ static_assert(JSOpLength_DelProp == JSOpLength_StrictDelProp,
+ "delprop and strictdelprop must be the same size");
+ HandleValue val = REGS.stackHandleAt(-1);
+ ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
+ bool res = false;
+ if (JSOp(*REGS.pc) == JSOp::StrictDelProp) {
+ if (!DelPropOperation<true>(cx, val, name, &res)) {
+ goto error;
+ }
+ } else {
+ if (!DelPropOperation<false>(cx, val, name, &res)) {
+ goto error;
+ }
+ }
+ REGS.sp[-1].setBoolean(res);
+ }
+ END_CASE(DelProp)
+
+ CASE(DelElem)
+ CASE(StrictDelElem) {
+ static_assert(JSOpLength_DelElem == JSOpLength_StrictDelElem,
+ "delelem and strictdelelem must be the same size");
+ HandleValue val = REGS.stackHandleAt(-2);
+ HandleValue propval = REGS.stackHandleAt(-1);
+ bool res = false;
+ if (JSOp(*REGS.pc) == JSOp::StrictDelElem) {
+ if (!DelElemOperation<true>(cx, val, propval, &res)) {
+ goto error;
+ }
+ } else {
+ if (!DelElemOperation<false>(cx, val, propval, &res)) {
+ goto error;
+ }
+ }
+ REGS.sp[-2].setBoolean(res);
+ REGS.sp--;
+ }
+ END_CASE(DelElem)
+
+ CASE(ToPropertyKey) {
+ ReservedRooted<Value> idval(&rootValue1, REGS.sp[-1]);
+ MutableHandleValue res = REGS.stackHandleAt(-1);
+ if (!ToPropertyKeyOperation(cx, idval, res)) {
+ goto error;
+ }
+ }
+ END_CASE(ToPropertyKey)
+
+ CASE(TypeofExpr)
+ CASE(Typeof) {
+ REGS.sp[-1].setString(TypeOfOperation(REGS.sp[-1], cx->runtime()));
+ }
+ END_CASE(Typeof)
+
+ CASE(Void) { REGS.sp[-1].setUndefined(); }
+ END_CASE(Void)
+
+ CASE(FunctionThis) {
+ PUSH_NULL();
+ if (!GetFunctionThis(cx, REGS.fp(), REGS.stackHandleAt(-1))) {
+ goto error;
+ }
+ }
+ END_CASE(FunctionThis)
+
+ CASE(GlobalThis) {
+ MOZ_ASSERT(!script->hasNonSyntacticScope());
+ PUSH_OBJECT(*cx->global()->lexicalEnvironment().thisObject());
+ }
+ END_CASE(GlobalThis)
+
+ CASE(NonSyntacticGlobalThis) {
+ PUSH_NULL();
+ GetNonSyntacticGlobalThis(cx, REGS.fp()->environmentChain(),
+ REGS.stackHandleAt(-1));
+ }
+ END_CASE(NonSyntacticGlobalThis)
+
+ CASE(CheckIsObj) {
+ if (!REGS.sp[-1].isObject()) {
+ MOZ_ALWAYS_FALSE(
+ ThrowCheckIsObject(cx, CheckIsObjectKind(GET_UINT8(REGS.pc))));
+ goto error;
+ }
+ }
+ END_CASE(CheckIsObj)
+
+ CASE(CheckThis) {
+ if (REGS.sp[-1].isMagic(JS_UNINITIALIZED_LEXICAL)) {
+ MOZ_ALWAYS_FALSE(ThrowUninitializedThis(cx));
+ goto error;
+ }
+ }
+ END_CASE(CheckThis)
+
+ CASE(CheckThisReinit) {
+ if (!REGS.sp[-1].isMagic(JS_UNINITIALIZED_LEXICAL)) {
+ MOZ_ALWAYS_FALSE(ThrowInitializedThis(cx));
+ goto error;
+ }
+ }
+ END_CASE(CheckThisReinit)
+
+ CASE(CheckReturn) {
+ ReservedRooted<Value> thisv(&rootValue0, REGS.sp[-1]);
+ MutableHandleValue rval = REGS.stackHandleAt(-1);
+ if (!REGS.fp()->checkReturn(cx, thisv, rval)) {
+ goto error;
+ }
+ }
+ END_CASE(CheckReturn)
+
+ CASE(GetProp) {
+ ReservedRooted<Value> lval(&rootValue0, REGS.sp[-1]);
+ MutableHandleValue res = REGS.stackHandleAt(-1);
+ ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
+ if (!GetPropertyOperation(cx, name, lval, res)) {
+ goto error;
+ }
+ cx->debugOnlyCheck(res);
+ }
+ END_CASE(GetProp)
+
+ CASE(GetPropSuper) {
+ ReservedRooted<Value> receiver(&rootValue0, REGS.sp[-2]);
+ HandleValue lval = REGS.stackHandleAt(-1);
+ MOZ_ASSERT(lval.isObjectOrNull());
+ MutableHandleValue rref = REGS.stackHandleAt(-2);
+ ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
+
+ ReservedRooted<JSObject*> obj(&rootObject0);
+ obj = ToObjectFromStackForPropertyAccess(cx, lval, -1, name);
+ if (!obj) {
+ goto error;
+ }
+
+ if (!GetProperty(cx, obj, receiver, name, rref)) {
+ goto error;
+ }
+
+ cx->debugOnlyCheck(rref);
+
+ REGS.sp--;
+ }
+ END_CASE(GetPropSuper)
+
+ CASE(GetBoundName) {
+ ReservedRooted<JSObject*> env(&rootObject0, &REGS.sp[-1].toObject());
+ ReservedRooted<jsid> id(&rootId0, NameToId(script->getName(REGS.pc)));
+ MutableHandleValue rval = REGS.stackHandleAt(-1);
+ if (!GetNameBoundInEnvironment(cx, env, id, rval)) {
+ goto error;
+ }
+ cx->debugOnlyCheck(rval);
+ }
+ END_CASE(GetBoundName)
+
+ CASE(SetIntrinsic) {
+ HandleValue value = REGS.stackHandleAt(-1);
+
+ if (!SetIntrinsicOperation(cx, script, REGS.pc, value)) {
+ goto error;
+ }
+ }
+ END_CASE(SetIntrinsic)
+
+ CASE(SetGName)
+ CASE(StrictSetGName)
+ CASE(SetName)
+ CASE(StrictSetName) {
+ static_assert(JSOpLength_SetName == JSOpLength_StrictSetName,
+ "setname and strictsetname must be the same size");
+ static_assert(JSOpLength_SetGName == JSOpLength_StrictSetGName,
+ "setgname and strictsetgname must be the same size");
+ static_assert(JSOpLength_SetName == JSOpLength_SetGName,
+ "We're sharing the END_CASE so the lengths better match");
+
+ ReservedRooted<JSObject*> env(&rootObject0, &REGS.sp[-2].toObject());
+ HandleValue value = REGS.stackHandleAt(-1);
+
+ if (!SetNameOperation(cx, script, REGS.pc, env, value)) {
+ goto error;
+ }
+
+ REGS.sp[-2] = REGS.sp[-1];
+ REGS.sp--;
+ }
+ END_CASE(SetName)
+
+ CASE(SetProp)
+ CASE(StrictSetProp) {
+ static_assert(JSOpLength_SetProp == JSOpLength_StrictSetProp,
+ "setprop and strictsetprop must be the same size");
+ int lvalIndex = -2;
+ HandleValue lval = REGS.stackHandleAt(lvalIndex);
+ HandleValue rval = REGS.stackHandleAt(-1);
+
+ ReservedRooted<jsid> id(&rootId0, NameToId(script->getName(REGS.pc)));
+
+ bool strict = JSOp(*REGS.pc) == JSOp::StrictSetProp;
+
+ ReservedRooted<JSObject*> obj(&rootObject0);
+ obj = ToObjectFromStackForPropertyAccess(cx, lval, lvalIndex, id);
+ if (!obj) {
+ goto error;
+ }
+
+ if (!SetObjectElementOperation(cx, obj, id, rval, lval, strict)) {
+ goto error;
+ }
+
+ REGS.sp[-2] = REGS.sp[-1];
+ REGS.sp--;
+ }
+ END_CASE(SetProp)
+
+ CASE(SetPropSuper)
+ CASE(StrictSetPropSuper) {
+ static_assert(
+ JSOpLength_SetPropSuper == JSOpLength_StrictSetPropSuper,
+ "setprop-super and strictsetprop-super must be the same size");
+
+ HandleValue receiver = REGS.stackHandleAt(-3);
+ HandleValue lval = REGS.stackHandleAt(-2);
+ MOZ_ASSERT(lval.isObjectOrNull());
+ HandleValue rval = REGS.stackHandleAt(-1);
+ ReservedRooted<jsid> id(&rootId0, NameToId(script->getName(REGS.pc)));
+
+ bool strict = JSOp(*REGS.pc) == JSOp::StrictSetPropSuper;
+
+ ReservedRooted<JSObject*> obj(&rootObject0);
+ obj = ToObjectFromStackForPropertyAccess(cx, lval, -2, id);
+ if (!obj) {
+ goto error;
+ }
+
+ if (!SetObjectElementOperation(cx, obj, id, rval, receiver, strict)) {
+ goto error;
+ }
+
+ REGS.sp[-3] = REGS.sp[-1];
+ REGS.sp -= 2;
+ }
+ END_CASE(SetPropSuper)
+
+ CASE(GetElem) {
+ int lvalIndex = -2;
+ ReservedRooted<Value> lval(&rootValue0, REGS.sp[lvalIndex]);
+ HandleValue rval = REGS.stackHandleAt(-1);
+ MutableHandleValue res = REGS.stackHandleAt(-2);
+
+ if (!GetElementOperationWithStackIndex(cx, lval, lvalIndex, rval, res)) {
+ goto error;
+ }
+
+ REGS.sp--;
+ }
+ END_CASE(GetElem)
+
+ CASE(GetElemSuper) {
+ ReservedRooted<Value> receiver(&rootValue0, REGS.sp[-3]);
+ HandleValue index = REGS.stackHandleAt(-2);
+ HandleValue lval = REGS.stackHandleAt(-1);
+ MOZ_ASSERT(lval.isObjectOrNull());
+
+ MutableHandleValue res = REGS.stackHandleAt(-3);
+
+ ReservedRooted<JSObject*> obj(&rootObject0);
+ obj = ToObjectFromStackForPropertyAccess(cx, lval, -1, index);
+ if (!obj) {
+ goto error;
+ }
+
+ if (!GetObjectElementOperation(cx, JSOp(*REGS.pc), obj, receiver, index,
+ res)) {
+ goto error;
+ }
+
+ REGS.sp -= 2;
+ }
+ END_CASE(GetElemSuper)
+
+ CASE(SetElem)
+ CASE(StrictSetElem) {
+ static_assert(JSOpLength_SetElem == JSOpLength_StrictSetElem,
+ "setelem and strictsetelem must be the same size");
+ int receiverIndex = -3;
+ HandleValue receiver = REGS.stackHandleAt(receiverIndex);
+ HandleValue value = REGS.stackHandleAt(-1);
+
+ ReservedRooted<JSObject*> obj(&rootObject0);
+ obj = ToObjectFromStackForPropertyAccess(cx, receiver, receiverIndex,
+ REGS.stackHandleAt(-2));
+ if (!obj) {
+ goto error;
+ }
+
+ ReservedRooted<jsid> id(&rootId0);
+ FETCH_ELEMENT_ID(-2, id);
+
+ if (!SetObjectElementOperation(cx, obj, id, value, receiver,
+ JSOp(*REGS.pc) == JSOp::StrictSetElem)) {
+ goto error;
+ }
+ REGS.sp[-3] = value;
+ REGS.sp -= 2;
+ }
+ END_CASE(SetElem)
+
+ CASE(SetElemSuper)
+ CASE(StrictSetElemSuper) {
+ static_assert(
+ JSOpLength_SetElemSuper == JSOpLength_StrictSetElemSuper,
+ "setelem-super and strictsetelem-super must be the same size");
+
+ HandleValue receiver = REGS.stackHandleAt(-4);
+ HandleValue lval = REGS.stackHandleAt(-2);
+ MOZ_ASSERT(lval.isObjectOrNull());
+ HandleValue value = REGS.stackHandleAt(-1);
+
+ ReservedRooted<JSObject*> obj(&rootObject0);
+ obj = ToObjectFromStackForPropertyAccess(cx, lval, -2,
+ REGS.stackHandleAt(-3));
+ if (!obj) {
+ goto error;
+ }
+
+ ReservedRooted<jsid> id(&rootId0);
+ FETCH_ELEMENT_ID(-3, id);
+
+ bool strict = JSOp(*REGS.pc) == JSOp::StrictSetElemSuper;
+ if (!SetObjectElementOperation(cx, obj, id, value, receiver, strict)) {
+ goto error;
+ }
+ REGS.sp[-4] = value;
+ REGS.sp -= 3;
+ }
+ END_CASE(SetElemSuper)
+
+ CASE(Eval)
+ CASE(StrictEval) {
+ static_assert(JSOpLength_Eval == JSOpLength_StrictEval,
+ "eval and stricteval must be the same size");
+
+ CallArgs args = CallArgsFromSp(GET_ARGC(REGS.pc), REGS.sp);
+ if (cx->global()->valueIsEval(args.calleev())) {
+ if (!DirectEval(cx, args.get(0), args.rval())) {
+ goto error;
+ }
+ } else {
+ if (!CallFromStack(cx, args, CallReason::Call)) {
+ goto error;
+ }
+ }
+
+ REGS.sp = args.spAfterCall();
+ }
+ END_CASE(Eval)
+
+ CASE(SpreadNew)
+ CASE(SpreadCall)
+ CASE(SpreadSuperCall) {
+ if (REGS.fp()->hasPushedGeckoProfilerFrame()) {
+ cx->geckoProfiler().updatePC(cx, script, REGS.pc);
+ }
+ /* FALL THROUGH */
+ }
+
+ CASE(SpreadEval)
+ CASE(StrictSpreadEval) {
+ static_assert(JSOpLength_SpreadEval == JSOpLength_StrictSpreadEval,
+ "spreadeval and strictspreadeval must be the same size");
+ bool construct = JSOp(*REGS.pc) == JSOp::SpreadNew ||
+ JSOp(*REGS.pc) == JSOp::SpreadSuperCall;
+
+ MOZ_ASSERT(REGS.stackDepth() >= 3u + construct);
+
+ HandleValue callee = REGS.stackHandleAt(-3 - construct);
+ HandleValue thisv = REGS.stackHandleAt(-2 - construct);
+ HandleValue arr = REGS.stackHandleAt(-1 - construct);
+ MutableHandleValue ret = REGS.stackHandleAt(-3 - construct);
+
+ RootedValue& newTarget = rootValue0;
+ if (construct) {
+ newTarget = REGS.sp[-1];
+ } else {
+ newTarget = NullValue();
+ }
+
+ if (!SpreadCallOperation(cx, script, REGS.pc, thisv, callee, arr,
+ newTarget, ret)) {
+ goto error;
+ }
+
+ REGS.sp -= 2 + construct;
+ }
+ END_CASE(SpreadCall)
+
+ CASE(New)
+ CASE(NewContent)
+ CASE(Call)
+ CASE(CallContent)
+ CASE(CallIgnoresRv)
+ CASE(CallIter)
+ CASE(CallContentIter)
+ CASE(SuperCall) {
+ static_assert(JSOpLength_Call == JSOpLength_New,
+ "call and new must be the same size");
+ static_assert(JSOpLength_Call == JSOpLength_CallContent,
+ "call and call-content must be the same size");
+ static_assert(JSOpLength_Call == JSOpLength_CallIgnoresRv,
+ "call and call-ignores-rv must be the same size");
+ static_assert(JSOpLength_Call == JSOpLength_CallIter,
+ "call and calliter must be the same size");
+ static_assert(JSOpLength_Call == JSOpLength_CallContentIter,
+ "call and call-content-iter must be the same size");
+ static_assert(JSOpLength_Call == JSOpLength_SuperCall,
+ "call and supercall must be the same size");
+
+ if (REGS.fp()->hasPushedGeckoProfilerFrame()) {
+ cx->geckoProfiler().updatePC(cx, script, REGS.pc);
+ }
+
+ JSOp op = JSOp(*REGS.pc);
+ MaybeConstruct construct = MaybeConstruct(
+ op == JSOp::New || op == JSOp::NewContent || op == JSOp::SuperCall);
+ bool ignoresReturnValue = op == JSOp::CallIgnoresRv;
+ unsigned argStackSlots = GET_ARGC(REGS.pc) + construct;
+
+ MOZ_ASSERT(REGS.stackDepth() >= 2u + GET_ARGC(REGS.pc));
+ CallArgs args =
+ CallArgsFromSp(argStackSlots, REGS.sp, construct, ignoresReturnValue);
+
+ JSFunction* maybeFun;
+ bool isFunction = IsFunctionObject(args.calleev(), &maybeFun);
+
+ // Use the slow path if the callee is not an interpreted function, if we
+ // have to throw an exception, or if we might have to invoke the
+ // OnNativeCall hook for a self-hosted builtin.
+ if (!isFunction || !maybeFun->isInterpreted() ||
+ (construct && !maybeFun->isConstructor()) ||
+ (!construct && maybeFun->isClassConstructor()) ||
+ cx->insideDebuggerEvaluationWithOnNativeCallHook) {
+ if (construct) {
+ CallReason reason = op == JSOp::NewContent ? CallReason::CallContent
+ : CallReason::Call;
+ if (!ConstructFromStack(cx, args, reason)) {
+ goto error;
+ }
+ } else {
+ if ((op == JSOp::CallIter || op == JSOp::CallContentIter) &&
+ args.calleev().isPrimitive()) {
+ MOZ_ASSERT(args.length() == 0, "thisv must be on top of the stack");
+ ReportValueError(cx, JSMSG_NOT_ITERABLE, -1, args.thisv(), nullptr);
+ goto error;
+ }
+
+ CallReason reason =
+ (op == JSOp::CallContent || op == JSOp::CallContentIter)
+ ? CallReason::CallContent
+ : CallReason::Call;
+ if (!CallFromStack(cx, args, reason)) {
+ goto error;
+ }
+ }
+ Value* newsp = args.spAfterCall();
+ REGS.sp = newsp;
+ ADVANCE_AND_DISPATCH(JSOpLength_Call);
+ }
+
+ {
+ MOZ_ASSERT(maybeFun);
+ ReservedRooted<JSFunction*> fun(&rootFunction0, maybeFun);
+ ReservedRooted<JSScript*> funScript(
+ &rootScript0, JSFunction::getOrCreateScript(cx, fun));
+ if (!funScript) {
+ goto error;
+ }
+
+ // Enter the callee's realm if this is a cross-realm call. Use
+ // MakeScopeExit to leave this realm on all error/JIT-return paths
+ // below.
+ const bool isCrossRealm = cx->realm() != funScript->realm();
+ if (isCrossRealm) {
+ cx->enterRealmOf(funScript);
+ }
+ auto leaveRealmGuard =
+ mozilla::MakeScopeExit([isCrossRealm, cx, &script] {
+ if (isCrossRealm) {
+ cx->leaveRealm(script->realm());
+ }
+ });
+
+ if (construct && !MaybeCreateThisForConstructor(cx, args)) {
+ goto error;
+ }
+
+ {
+ InvokeState state(cx, args, construct);
+
+ jit::EnterJitStatus status = jit::MaybeEnterJit(cx, state);
+ switch (status) {
+ case jit::EnterJitStatus::Error:
+ goto error;
+ case jit::EnterJitStatus::Ok:
+ interpReturnOK = true;
+ CHECK_BRANCH();
+ REGS.sp = args.spAfterCall();
+ goto jit_return;
+ case jit::EnterJitStatus::NotEntered:
+ break;
+ }
+
+#ifdef NIGHTLY_BUILD
+ // If entry trampolines are enabled, call back into
+ // MaybeEnterInterpreterTrampoline so we can generate an
+ // entry trampoline for the new frame.
+ if (jit::JitOptions.emitInterpreterEntryTrampoline) {
+ if (MaybeEnterInterpreterTrampoline(cx, state)) {
+ interpReturnOK = true;
+ CHECK_BRANCH();
+ REGS.sp = args.spAfterCall();
+ goto jit_return;
+ }
+ goto error;
+ }
+#endif
+ }
+
+ funScript = fun->nonLazyScript();
+
+ if (!activation.pushInlineFrame(args, funScript, construct)) {
+ goto error;
+ }
+ leaveRealmGuard.release(); // We leave the callee's realm when we
+ // call popInlineFrame.
+ }
+
+ SET_SCRIPT(REGS.fp()->script());
+
+ if (!REGS.fp()->prologue(cx)) {
+ goto prologue_error;
+ }
+
+ if (!DebugAPI::onEnterFrame(cx, REGS.fp())) {
+ goto error;
+ }
+
+ // Increment the coverage for the main entry point.
+ INIT_COVERAGE();
+ COUNT_COVERAGE_MAIN();
+
+ /* Load first op and dispatch it (safe since JSOp::RetRval). */
+ ADVANCE_AND_DISPATCH(0);
+ }
+
+ CASE(OptimizeSpreadCall) {
+ ReservedRooted<Value> val(&rootValue0, REGS.sp[-1]);
+ MutableHandleValue rval = REGS.stackHandleAt(-1);
+
+ if (!OptimizeSpreadCall(cx, val, rval)) {
+ goto error;
+ }
+ }
+ END_CASE(OptimizeSpreadCall)
+
+ CASE(ThrowMsg) {
+ MOZ_ALWAYS_FALSE(ThrowMsgOperation(cx, GET_UINT8(REGS.pc)));
+ goto error;
+ }
+ END_CASE(ThrowMsg)
+
+ CASE(ImplicitThis) {
+ ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
+ ReservedRooted<JSObject*> envObj(&rootObject0,
+ REGS.fp()->environmentChain());
+ ReservedRooted<JSObject*> env(&rootObject1);
+ if (!LookupNameWithGlobalDefault(cx, name, envObj, &env)) {
+ goto error;
+ }
+
+ Value v = ComputeImplicitThis(env);
+ PUSH_COPY(v);
+ }
+ END_CASE(ImplicitThis)
+
+ CASE(GetGName) {
+ ReservedRooted<Value> rval(&rootValue0);
+ ReservedRooted<JSObject*> env(&rootObject0,
+ &cx->global()->lexicalEnvironment());
+ ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
+ MOZ_ASSERT(!script->hasNonSyntacticScope());
+ if (!GetNameOperation(cx, env, name, JSOp(REGS.pc[JSOpLength_GetGName]),
+ &rval)) {
+ goto error;
+ }
+
+ PUSH_COPY(rval);
+ }
+ END_CASE(GetGName)
+
+ CASE(GetName) {
+ ReservedRooted<Value> rval(&rootValue0);
+ ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
+ if (!GetNameOperation(cx, REGS.fp()->environmentChain(), name,
+ JSOp(REGS.pc[JSOpLength_GetName]), &rval)) {
+ goto error;
+ }
+
+ PUSH_COPY(rval);
+ }
+ END_CASE(GetName)
+
+ CASE(GetImport) {
+ PUSH_NULL();
+ MutableHandleValue rval = REGS.stackHandleAt(-1);
+ HandleObject envChain = REGS.fp()->environmentChain();
+ if (!GetImportOperation(cx, envChain, script, REGS.pc, rval)) {
+ goto error;
+ }
+ }
+ END_CASE(GetImport)
+
+ CASE(GetIntrinsic) {
+ ReservedRooted<Value> rval(&rootValue0);
+ if (!GetIntrinsicOperation(cx, script, REGS.pc, &rval)) {
+ goto error;
+ }
+
+ PUSH_COPY(rval);
+ }
+ END_CASE(GetIntrinsic)
+
+ CASE(Uint16) { PUSH_INT32((int32_t)GET_UINT16(REGS.pc)); }
+ END_CASE(Uint16)
+
+ CASE(Uint24) { PUSH_INT32((int32_t)GET_UINT24(REGS.pc)); }
+ END_CASE(Uint24)
+
+ CASE(Int8) { PUSH_INT32(GET_INT8(REGS.pc)); }
+ END_CASE(Int8)
+
+ CASE(Int32) { PUSH_INT32(GET_INT32(REGS.pc)); }
+ END_CASE(Int32)
+
+ CASE(Double) { PUSH_COPY(GET_INLINE_VALUE(REGS.pc)); }
+ END_CASE(Double)
+
+ CASE(String) { PUSH_STRING(script->getString(REGS.pc)); }
+ END_CASE(String)
+
+ CASE(ToString) {
+ MutableHandleValue oper = REGS.stackHandleAt(-1);
+
+ if (!oper.isString()) {
+ JSString* operString = ToString<CanGC>(cx, oper);
+ if (!operString) {
+ goto error;
+ }
+ oper.setString(operString);
+ }
+ }
+ END_CASE(ToString)
+
+ CASE(Symbol) {
+ PUSH_SYMBOL(cx->wellKnownSymbols().get(GET_UINT8(REGS.pc)));
+ }
+ END_CASE(Symbol)
+
+ CASE(Object) {
+ MOZ_ASSERT(script->treatAsRunOnce());
+ PUSH_OBJECT(*script->getObject(REGS.pc));
+ }
+ END_CASE(Object)
+
+ CASE(CallSiteObj) {
+ JSObject* cso = script->getObject(REGS.pc);
+ MOZ_ASSERT(!cso->as<ArrayObject>().isExtensible());
+ MOZ_ASSERT(cso->as<ArrayObject>().containsPure(cx->names().raw));
+ PUSH_OBJECT(*cso);
+ }
+ END_CASE(CallSiteObj)
+
+ CASE(RegExp) {
+ /*
+ * Push a regexp object cloned from the regexp literal object mapped by
+ * the bytecode at pc.
+ */
+ ReservedRooted<JSObject*> re(&rootObject0, script->getRegExp(REGS.pc));
+ JSObject* obj = CloneRegExpObject(cx, re.as<RegExpObject>());
+ if (!obj) {
+ goto error;
+ }
+ PUSH_OBJECT(*obj);
+ }
+ END_CASE(RegExp)
+
+ CASE(Zero) { PUSH_INT32(0); }
+ END_CASE(Zero)
+
+ CASE(One) { PUSH_INT32(1); }
+ END_CASE(One)
+
+ CASE(Null) { PUSH_NULL(); }
+ END_CASE(Null)
+
+ CASE(False) { PUSH_BOOLEAN(false); }
+ END_CASE(False)
+
+ CASE(True) { PUSH_BOOLEAN(true); }
+ END_CASE(True)
+
+ CASE(TableSwitch) {
+ jsbytecode* pc2 = REGS.pc;
+ int32_t len = GET_JUMP_OFFSET(pc2);
+
+ /*
+ * ECMAv2+ forbids conversion of discriminant, so we will skip to the
+ * default case if the discriminant isn't already an int jsval. (This
+ * opcode is emitted only for dense int-domain switches.)
+ */
+ const Value& rref = *--REGS.sp;
+ int32_t i;
+ if (rref.isInt32()) {
+ i = rref.toInt32();
+ } else {
+ /* Use mozilla::NumberEqualsInt32 to treat -0 (double) as 0. */
+ if (!rref.isDouble() || !NumberEqualsInt32(rref.toDouble(), &i)) {
+ ADVANCE_AND_DISPATCH(len);
+ }
+ }
+
+ pc2 += JUMP_OFFSET_LEN;
+ int32_t low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ int32_t high = GET_JUMP_OFFSET(pc2);
+
+ i = uint32_t(i) - uint32_t(low);
+ if (uint32_t(i) < uint32_t(high - low + 1)) {
+ len = script->tableSwitchCaseOffset(REGS.pc, uint32_t(i)) -
+ script->pcToOffset(REGS.pc);
+ }
+ ADVANCE_AND_DISPATCH(len);
+ }
+
+ CASE(Arguments) {
+ MOZ_ASSERT(script->needsArgsObj());
+ ArgumentsObject* obj = ArgumentsObject::createExpected(cx, REGS.fp());
+ if (!obj) {
+ goto error;
+ }
+ PUSH_COPY(ObjectValue(*obj));
+ }
+ END_CASE(Arguments)
+
+ CASE(Rest) {
+ ReservedRooted<JSObject*> rest(&rootObject0,
+ REGS.fp()->createRestParameter(cx));
+ if (!rest) {
+ goto error;
+ }
+ PUSH_COPY(ObjectValue(*rest));
+ }
+ END_CASE(Rest)
+
+ CASE(GetAliasedVar) {
+ EnvironmentCoordinate ec = EnvironmentCoordinate(REGS.pc);
+ ReservedRooted<Value> val(
+ &rootValue0, REGS.fp()->aliasedEnvironment(ec).aliasedBinding(ec));
+
+ ASSERT_UNINITIALIZED_ALIASED_LEXICAL(val);
+
+ PUSH_COPY(val);
+ }
+ END_CASE(GetAliasedVar)
+
+ CASE(GetAliasedDebugVar) {
+ EnvironmentCoordinate ec = EnvironmentCoordinate(REGS.pc);
+ ReservedRooted<Value> val(
+ &rootValue0,
+ REGS.fp()->aliasedEnvironmentMaybeDebug(ec).aliasedBinding(ec));
+
+ ASSERT_UNINITIALIZED_ALIASED_LEXICAL(val);
+
+ PUSH_COPY(val);
+ }
+ END_CASE(GetAliasedVar)
+
+ CASE(SetAliasedVar) {
+ EnvironmentCoordinate ec = EnvironmentCoordinate(REGS.pc);
+ EnvironmentObject& obj = REGS.fp()->aliasedEnvironment(ec);
+ MOZ_ASSERT(!IsUninitializedLexical(obj.aliasedBinding(ec)));
+ obj.setAliasedBinding(ec, REGS.sp[-1]);
+ }
+ END_CASE(SetAliasedVar)
+
+ CASE(ThrowSetConst) {
+ ReportRuntimeLexicalError(cx, JSMSG_BAD_CONST_ASSIGN, script, REGS.pc);
+ goto error;
+ }
+ END_CASE(ThrowSetConst)
+
+ CASE(CheckLexical) {
+ if (REGS.sp[-1].isMagic(JS_UNINITIALIZED_LEXICAL)) {
+ ReportRuntimeLexicalError(cx, JSMSG_UNINITIALIZED_LEXICAL, script,
+ REGS.pc);
+ goto error;
+ }
+ }
+ END_CASE(CheckLexical)
+
+ CASE(CheckAliasedLexical) {
+ if (REGS.sp[-1].isMagic(JS_UNINITIALIZED_LEXICAL)) {
+ ReportRuntimeLexicalError(cx, JSMSG_UNINITIALIZED_LEXICAL, script,
+ REGS.pc);
+ goto error;
+ }
+ }
+ END_CASE(CheckAliasedLexical)
+
+ CASE(InitLexical) {
+ uint32_t i = GET_LOCALNO(REGS.pc);
+ REGS.fp()->unaliasedLocal(i) = REGS.sp[-1];
+ }
+ END_CASE(InitLexical)
+
+ CASE(InitAliasedLexical) {
+ EnvironmentCoordinate ec = EnvironmentCoordinate(REGS.pc);
+ EnvironmentObject& obj = REGS.fp()->aliasedEnvironment(ec);
+ obj.setAliasedBinding(ec, REGS.sp[-1]);
+ }
+ END_CASE(InitAliasedLexical)
+
+ CASE(InitGLexical) {
+ ExtensibleLexicalEnvironmentObject* lexicalEnv;
+ if (script->hasNonSyntacticScope()) {
+ lexicalEnv = &REGS.fp()->extensibleLexicalEnvironment();
+ } else {
+ lexicalEnv = &cx->global()->lexicalEnvironment();
+ }
+ HandleValue value = REGS.stackHandleAt(-1);
+ InitGlobalLexicalOperation(cx, lexicalEnv, script, REGS.pc, value);
+ }
+ END_CASE(InitGLexical)
+
+ CASE(Uninitialized) { PUSH_MAGIC(JS_UNINITIALIZED_LEXICAL); }
+ END_CASE(Uninitialized)
+
+ CASE(GetArg) {
+ unsigned i = GET_ARGNO(REGS.pc);
+ if (script->argsObjAliasesFormals()) {
+ PUSH_COPY(REGS.fp()->argsObj().arg(i));
+ } else {
+ PUSH_COPY(REGS.fp()->unaliasedFormal(i));
+ }
+ }
+ END_CASE(GetArg)
+
+ CASE(GetFrameArg) {
+ uint32_t i = GET_ARGNO(REGS.pc);
+ PUSH_COPY(REGS.fp()->unaliasedFormal(i, DONT_CHECK_ALIASING));
+ }
+ END_CASE(GetFrameArg)
+
+ CASE(SetArg) {
+ unsigned i = GET_ARGNO(REGS.pc);
+ if (script->argsObjAliasesFormals()) {
+ REGS.fp()->argsObj().setArg(i, REGS.sp[-1]);
+ } else {
+ REGS.fp()->unaliasedFormal(i) = REGS.sp[-1];
+ }
+ }
+ END_CASE(SetArg)
+
+ CASE(GetLocal) {
+ uint32_t i = GET_LOCALNO(REGS.pc);
+ PUSH_COPY_SKIP_CHECK(REGS.fp()->unaliasedLocal(i));
+
+#ifdef DEBUG
+ if (IsUninitializedLexical(REGS.sp[-1])) {
+ JSOp next = JSOp(*GetNextPc(REGS.pc));
+ MOZ_ASSERT(next == JSOp::CheckThis || next == JSOp::CheckReturn ||
+ next == JSOp::CheckThisReinit || next == JSOp::CheckLexical);
+ }
+
+ /*
+ * Skip the same-compartment assertion if the local will be immediately
+ * popped. We do not guarantee sync for dead locals when coming in from
+ * the method JIT, and a GetLocal followed by Pop is not considered to
+ * be a use of the variable.
+ */
+ if (JSOp(REGS.pc[JSOpLength_GetLocal]) != JSOp::Pop) {
+ cx->debugOnlyCheck(REGS.sp[-1]);
+ }
+#endif
+ }
+ END_CASE(GetLocal)
+
+ CASE(SetLocal) {
+ uint32_t i = GET_LOCALNO(REGS.pc);
+
+ MOZ_ASSERT(!IsUninitializedLexical(REGS.fp()->unaliasedLocal(i)));
+
+ REGS.fp()->unaliasedLocal(i) = REGS.sp[-1];
+ }
+ END_CASE(SetLocal)
+
+ CASE(ArgumentsLength) {
+ MOZ_ASSERT(!script->needsArgsObj());
+ PUSH_INT32(REGS.fp()->numActualArgs());
+ }
+ END_CASE(ArgumentsLength)
+
+ CASE(GetActualArg) {
+ MOZ_ASSERT(!script->needsArgsObj());
+ uint32_t index = REGS.sp[-1].toInt32();
+ REGS.sp[-1] = REGS.fp()->unaliasedActual(index);
+ }
+ END_CASE(GetActualArg)
+
+ CASE(GlobalOrEvalDeclInstantiation) {
+ GCThingIndex lastFun = GET_GCTHING_INDEX(REGS.pc);
+ HandleObject env = REGS.fp()->environmentChain();
+ if (!GlobalOrEvalDeclInstantiation(cx, env, script, lastFun)) {
+ goto error;
+ }
+ }
+ END_CASE(GlobalOrEvalDeclInstantiation)
+
+ CASE(Lambda) {
+ /* Load the specified function object literal. */
+ ReservedRooted<JSFunction*> fun(&rootFunction0,
+ script->getFunction(REGS.pc));
+ JSObject* obj = Lambda(cx, fun, REGS.fp()->environmentChain());
+ if (!obj) {
+ goto error;
+ }
+
+ MOZ_ASSERT(obj->staticPrototype());
+ PUSH_OBJECT(*obj);
+ }
+ END_CASE(Lambda)
+
+ CASE(ToAsyncIter) {
+ ReservedRooted<Value> nextMethod(&rootValue0, REGS.sp[-1]);
+ ReservedRooted<JSObject*> iter(&rootObject1, &REGS.sp[-2].toObject());
+ JSObject* asyncIter = CreateAsyncFromSyncIterator(cx, iter, nextMethod);
+ if (!asyncIter) {
+ goto error;
+ }
+
+ REGS.sp--;
+ REGS.sp[-1].setObject(*asyncIter);
+ }
+ END_CASE(ToAsyncIter)
+
+ CASE(CanSkipAwait) {
+ ReservedRooted<Value> val(&rootValue0, REGS.sp[-1]);
+ bool canSkip;
+ if (!CanSkipAwait(cx, val, &canSkip)) {
+ goto error;
+ }
+
+ PUSH_BOOLEAN(canSkip);
+ }
+ END_CASE(CanSkipAwait)
+
+ CASE(MaybeExtractAwaitValue) {
+ MutableHandleValue val = REGS.stackHandleAt(-2);
+ ReservedRooted<Value> canSkip(&rootValue0, REGS.sp[-1]);
+
+ if (canSkip.toBoolean()) {
+ if (!ExtractAwaitValue(cx, val, val)) {
+ goto error;
+ }
+ }
+ }
+ END_CASE(MaybeExtractAwaitValue)
+
+ CASE(AsyncAwait) {
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+ ReservedRooted<JSObject*> gen(&rootObject1, &REGS.sp[-1].toObject());
+ ReservedRooted<Value> value(&rootValue0, REGS.sp[-2]);
+ JSObject* promise =
+ AsyncFunctionAwait(cx, gen.as<AsyncFunctionGeneratorObject>(), value);
+ if (!promise) {
+ goto error;
+ }
+
+ REGS.sp--;
+ REGS.sp[-1].setObject(*promise);
+ }
+ END_CASE(AsyncAwait)
+
+ CASE(AsyncResolve) {
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+ auto resolveKind = AsyncFunctionResolveKind(GET_UINT8(REGS.pc));
+ ReservedRooted<JSObject*> gen(&rootObject1, &REGS.sp[-1].toObject());
+ ReservedRooted<Value> valueOrReason(&rootValue0, REGS.sp[-2]);
+ JSObject* promise =
+ AsyncFunctionResolve(cx, gen.as<AsyncFunctionGeneratorObject>(),
+ valueOrReason, resolveKind);
+ if (!promise) {
+ goto error;
+ }
+
+ REGS.sp--;
+ REGS.sp[-1].setObject(*promise);
+ }
+ END_CASE(AsyncResolve)
+
+ CASE(SetFunName) {
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+ FunctionPrefixKind prefixKind = FunctionPrefixKind(GET_UINT8(REGS.pc));
+ ReservedRooted<Value> name(&rootValue0, REGS.sp[-1]);
+ ReservedRooted<JSFunction*> fun(&rootFunction0,
+ &REGS.sp[-2].toObject().as<JSFunction>());
+ if (!SetFunctionName(cx, fun, name, prefixKind)) {
+ goto error;
+ }
+
+ REGS.sp--;
+ }
+ END_CASE(SetFunName)
+
+ CASE(Callee) {
+ MOZ_ASSERT(REGS.fp()->isFunctionFrame());
+ PUSH_COPY(REGS.fp()->calleev());
+ }
+ END_CASE(Callee)
+
+ CASE(InitPropGetter)
+ CASE(InitHiddenPropGetter)
+ CASE(InitPropSetter)
+ CASE(InitHiddenPropSetter) {
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+
+ ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-2].toObject());
+ ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
+ ReservedRooted<JSObject*> val(&rootObject1, &REGS.sp[-1].toObject());
+
+ if (!InitPropGetterSetterOperation(cx, REGS.pc, obj, name, val)) {
+ goto error;
+ }
+
+ REGS.sp--;
+ }
+ END_CASE(InitPropGetter)
+
+ CASE(InitElemGetter)
+ CASE(InitHiddenElemGetter)
+ CASE(InitElemSetter)
+ CASE(InitHiddenElemSetter) {
+ MOZ_ASSERT(REGS.stackDepth() >= 3);
+
+ ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-3].toObject());
+ ReservedRooted<Value> idval(&rootValue0, REGS.sp[-2]);
+ ReservedRooted<JSObject*> val(&rootObject1, &REGS.sp[-1].toObject());
+
+ if (!InitElemGetterSetterOperation(cx, REGS.pc, obj, idval, val)) {
+ goto error;
+ }
+
+ REGS.sp -= 2;
+ }
+ END_CASE(InitElemGetter)
+
+ CASE(Hole) { PUSH_MAGIC(JS_ELEMENTS_HOLE); }
+ END_CASE(Hole)
+
+ CASE(NewInit) {
+ JSObject* obj = NewObjectOperation(cx, script, REGS.pc);
+
+ if (!obj) {
+ goto error;
+ }
+ PUSH_OBJECT(*obj);
+ }
+ END_CASE(NewInit)
+
+ CASE(NewArray) {
+ uint32_t length = GET_UINT32(REGS.pc);
+ ArrayObject* obj = NewArrayOperation(cx, length);
+ if (!obj) {
+ goto error;
+ }
+ PUSH_OBJECT(*obj);
+ }
+ END_CASE(NewArray)
+
+ CASE(NewObject) {
+ JSObject* obj = NewObjectOperation(cx, script, REGS.pc);
+ if (!obj) {
+ goto error;
+ }
+ PUSH_OBJECT(*obj);
+ }
+ END_CASE(NewObject)
+
+ CASE(MutateProto) {
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+
+ if (REGS.sp[-1].isObjectOrNull()) {
+ ReservedRooted<JSObject*> newProto(&rootObject1,
+ REGS.sp[-1].toObjectOrNull());
+ ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-2].toObject());
+ MOZ_ASSERT(obj->is<PlainObject>());
+
+ if (!SetPrototype(cx, obj, newProto)) {
+ goto error;
+ }
+ }
+
+ REGS.sp--;
+ }
+ END_CASE(MutateProto)
+
+ CASE(InitProp)
+ CASE(InitLockedProp)
+ CASE(InitHiddenProp) {
+ static_assert(JSOpLength_InitProp == JSOpLength_InitLockedProp,
+ "initprop and initlockedprop must be the same size");
+ static_assert(JSOpLength_InitProp == JSOpLength_InitHiddenProp,
+ "initprop and inithiddenprop must be the same size");
+ /* Load the property's initial value into rval. */
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+ ReservedRooted<Value> rval(&rootValue0, REGS.sp[-1]);
+
+ /* Load the object being initialized into lval/obj. */
+ ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-2].toObject());
+
+ ReservedRooted<PropertyName*> name(&rootName0, script->getName(REGS.pc));
+
+ if (!InitPropertyOperation(cx, REGS.pc, obj, name, rval)) {
+ goto error;
+ }
+
+ REGS.sp--;
+ }
+ END_CASE(InitProp)
+
+ CASE(InitElem)
+ CASE(InitHiddenElem)
+ CASE(InitLockedElem) {
+ MOZ_ASSERT(REGS.stackDepth() >= 3);
+ HandleValue val = REGS.stackHandleAt(-1);
+ HandleValue id = REGS.stackHandleAt(-2);
+
+ ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-3].toObject());
+
+ if (!InitElemOperation(cx, REGS.pc, obj, id, val)) {
+ goto error;
+ }
+
+ REGS.sp -= 2;
+ }
+ END_CASE(InitElem)
+
+ CASE(InitElemArray) {
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+ HandleValue val = REGS.stackHandleAt(-1);
+ ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-2].toObject());
+
+ InitElemArrayOperation(cx, REGS.pc, obj.as<ArrayObject>(), val);
+ REGS.sp--;
+ }
+ END_CASE(InitElemArray)
+
+ CASE(InitElemInc) {
+ MOZ_ASSERT(REGS.stackDepth() >= 3);
+ HandleValue val = REGS.stackHandleAt(-1);
+
+ ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-3].toObject());
+
+ uint32_t index = REGS.sp[-2].toInt32();
+ if (!InitElemIncOperation(cx, obj.as<ArrayObject>(), index, val)) {
+ goto error;
+ }
+
+ REGS.sp[-2].setInt32(index + 1);
+ REGS.sp--;
+ }
+ END_CASE(InitElemInc)
+
+#ifdef ENABLE_RECORD_TUPLE
+ CASE(InitRecord) {
+ uint32_t length = GET_UINT32(REGS.pc);
+ RecordType* rec = RecordType::createUninitialized(cx, length);
+ if (!rec) {
+ goto error;
+ }
+ PUSH_EXTENDED_PRIMITIVE(*rec);
+ }
+ END_CASE(InitRecord)
+
+ CASE(AddRecordProperty) {
+ MOZ_ASSERT(REGS.stackDepth() >= 3);
+
+ ReservedRooted<JSObject*> rec(&rootObject0,
+ &REGS.sp[-3].toExtendedPrimitive());
+ MOZ_ASSERT(rec->is<RecordType>());
+
+ ReservedRooted<Value> key(&rootValue0, REGS.sp[-2]);
+ ReservedRooted<jsid> id(&rootId0);
+ if (!JS_ValueToId(cx, key, &id)) {
+ goto error;
+ }
+ if (!rec->as<RecordType>().initializeNextProperty(
+ cx, id, REGS.stackHandleAt(-1))) {
+ goto error;
+ }
+
+ REGS.sp -= 2;
+ }
+ END_CASE(AddRecordProperty)
+
+ CASE(AddRecordSpread) {
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+
+ if (!AddRecordSpreadOperation(cx, REGS.stackHandleAt(-2),
+ REGS.stackHandleAt(-1))) {
+ goto error;
+ }
+ REGS.sp--;
+ }
+ END_CASE(AddRecordSpread)
+
+ CASE(FinishRecord) {
+ MOZ_ASSERT(REGS.stackDepth() >= 1);
+ RecordType* rec = &REGS.sp[-1].toExtendedPrimitive().as<RecordType>();
+ if (!rec->finishInitialization(cx)) {
+ goto error;
+ }
+ }
+ END_CASE(FinishRecord)
+
+ CASE(InitTuple) {
+ uint32_t length = GET_UINT32(REGS.pc);
+ TupleType* tup = TupleType::createUninitialized(cx, length);
+ if (!tup) {
+ goto error;
+ }
+ PUSH_EXTENDED_PRIMITIVE(*tup);
+ }
+ END_CASE(InitTuple)
+
+ CASE(AddTupleElement) {
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+
+ ReservedRooted<JSObject*> tup(&rootObject0,
+ &REGS.sp[-2].toExtendedPrimitive());
+ HandleValue val = REGS.stackHandleAt(-1);
+
+ if (!tup->as<TupleType>().initializeNextElement(cx, val)) {
+ goto error;
+ }
+
+ REGS.sp--;
+ }
+ END_CASE(AddTupleElement)
+
+ CASE(FinishTuple) {
+ MOZ_ASSERT(REGS.stackDepth() >= 1);
+ TupleType& tup = REGS.sp[-1].toExtendedPrimitive().as<TupleType>();
+ tup.finishInitialization(cx);
+ }
+ END_CASE(FinishTuple)
+#endif
+
+ CASE(Exception) {
+ PUSH_NULL();
+ MutableHandleValue res = REGS.stackHandleAt(-1);
+ if (!GetAndClearException(cx, res)) {
+ goto error;
+ }
+ }
+ END_CASE(Exception)
+
+ CASE(Finally) { CHECK_BRANCH(); }
+ END_CASE(Finally)
+
+ CASE(Throw) {
+ CHECK_BRANCH();
+ ReservedRooted<Value> v(&rootValue0);
+ POP_COPY_TO(v);
+ MOZ_ALWAYS_FALSE(ThrowOperation(cx, v));
+ /* let the code at error try to catch the exception. */
+ goto error;
+ }
+
+ CASE(Instanceof) {
+ ReservedRooted<Value> rref(&rootValue0, REGS.sp[-1]);
+ if (HandleValue(rref).isPrimitive()) {
+ ReportValueError(cx, JSMSG_BAD_INSTANCEOF_RHS, -1, rref, nullptr);
+ goto error;
+ }
+ ReservedRooted<JSObject*> obj(&rootObject0, &rref.toObject());
+ bool cond = false;
+ if (!InstanceofOperator(cx, obj, REGS.stackHandleAt(-2), &cond)) {
+ goto error;
+ }
+ REGS.sp--;
+ REGS.sp[-1].setBoolean(cond);
+ }
+ END_CASE(Instanceof)
+
+ CASE(Debugger) {
+ if (!DebugAPI::onDebuggerStatement(cx, REGS.fp())) {
+ goto error;
+ }
+ }
+ END_CASE(Debugger)
+
+ CASE(PushLexicalEnv) {
+ ReservedRooted<Scope*> scope(&rootScope0, script->getScope(REGS.pc));
+
+ // Create block environment and push on scope chain.
+ if (!REGS.fp()->pushLexicalEnvironment(cx, scope.as<LexicalScope>())) {
+ goto error;
+ }
+ }
+ END_CASE(PushLexicalEnv)
+
+ CASE(PopLexicalEnv) {
+#ifdef DEBUG
+ Scope* scope = script->lookupScope(REGS.pc);
+ MOZ_ASSERT(scope);
+ MOZ_ASSERT(scope->is<LexicalScope>() || scope->is<ClassBodyScope>());
+ MOZ_ASSERT_IF(scope->is<LexicalScope>(),
+ scope->as<LexicalScope>().hasEnvironment());
+ MOZ_ASSERT_IF(scope->is<ClassBodyScope>(),
+ scope->as<ClassBodyScope>().hasEnvironment());
+#endif
+
+ if (MOZ_UNLIKELY(cx->realm()->isDebuggee())) {
+ DebugEnvironments::onPopLexical(cx, REGS.fp(), REGS.pc);
+ }
+
+ // Pop block from scope chain.
+ REGS.fp()->popOffEnvironmentChain<LexicalEnvironmentObject>();
+ }
+ END_CASE(PopLexicalEnv)
+
+ CASE(DebugLeaveLexicalEnv) {
+#ifdef DEBUG
+ Scope* scope = script->lookupScope(REGS.pc);
+ MOZ_ASSERT(scope);
+ MOZ_ASSERT(scope->is<LexicalScope>() || scope->is<ClassBodyScope>());
+ MOZ_ASSERT_IF(scope->is<LexicalScope>(),
+ !scope->as<LexicalScope>().hasEnvironment());
+ MOZ_ASSERT_IF(scope->is<ClassBodyScope>(),
+ !scope->as<ClassBodyScope>().hasEnvironment());
+#endif
+ // FIXME: This opcode should not be necessary. The debugger shouldn't
+ // need help from bytecode to do its job. See bug 927782.
+
+ if (MOZ_UNLIKELY(cx->realm()->isDebuggee())) {
+ DebugEnvironments::onPopLexical(cx, REGS.fp(), REGS.pc);
+ }
+ }
+ END_CASE(DebugLeaveLexicalEnv)
+
+ CASE(FreshenLexicalEnv) {
+#ifdef DEBUG
+ Scope* scope = script->getScope(REGS.pc);
+ auto envChain = REGS.fp()->environmentChain();
+ auto* envScope = &envChain->as<BlockLexicalEnvironmentObject>().scope();
+ MOZ_ASSERT(scope == envScope);
+#endif
+
+ if (MOZ_UNLIKELY(cx->realm()->isDebuggee())) {
+ DebugEnvironments::onPopLexical(cx, REGS.fp(), REGS.pc);
+ }
+
+ if (!REGS.fp()->freshenLexicalEnvironment(cx)) {
+ goto error;
+ }
+ }
+ END_CASE(FreshenLexicalEnv)
+
+ CASE(RecreateLexicalEnv) {
+#ifdef DEBUG
+ Scope* scope = script->getScope(REGS.pc);
+ auto envChain = REGS.fp()->environmentChain();
+ auto* envScope = &envChain->as<BlockLexicalEnvironmentObject>().scope();
+ MOZ_ASSERT(scope == envScope);
+#endif
+
+ if (MOZ_UNLIKELY(cx->realm()->isDebuggee())) {
+ DebugEnvironments::onPopLexical(cx, REGS.fp(), REGS.pc);
+ }
+
+ if (!REGS.fp()->recreateLexicalEnvironment(cx)) {
+ goto error;
+ }
+ }
+ END_CASE(RecreateLexicalEnv)
+
+ CASE(PushClassBodyEnv) {
+ ReservedRooted<Scope*> scope(&rootScope0, script->getScope(REGS.pc));
+
+ if (!REGS.fp()->pushClassBodyEnvironment(cx,
+ scope.as<ClassBodyScope>())) {
+ goto error;
+ }
+ }
+ END_CASE(PushClassBodyEnv)
+
+ CASE(PushVarEnv) {
+ ReservedRooted<Scope*> scope(&rootScope0, script->getScope(REGS.pc));
+
+ if (!REGS.fp()->pushVarEnvironment(cx, scope)) {
+ goto error;
+ }
+ }
+ END_CASE(PushVarEnv)
+
+ CASE(Generator) {
+ MOZ_ASSERT(!cx->isExceptionPending());
+ MOZ_ASSERT(REGS.stackDepth() == 0);
+ JSObject* obj = AbstractGeneratorObject::createFromFrame(cx, REGS.fp());
+ if (!obj) {
+ goto error;
+ }
+ PUSH_OBJECT(*obj);
+ }
+ END_CASE(Generator)
+
+ CASE(InitialYield) {
+ MOZ_ASSERT(!cx->isExceptionPending());
+ MOZ_ASSERT_IF(script->isModule() && script->isAsync(),
+ REGS.fp()->isModuleFrame());
+ MOZ_ASSERT_IF(!script->isModule() && script->isAsync(),
+ REGS.fp()->isFunctionFrame());
+ ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-1].toObject());
+ POP_RETURN_VALUE();
+ MOZ_ASSERT(REGS.stackDepth() == 0);
+ if (!AbstractGeneratorObject::suspend(cx, obj, REGS.fp(), REGS.pc,
+ script->nfixed())) {
+ goto error;
+ }
+ goto successful_return_continuation;
+ }
+
+ CASE(Yield)
+ CASE(Await) {
+ MOZ_ASSERT(!cx->isExceptionPending());
+ MOZ_ASSERT_IF(script->isModule() && script->isAsync(),
+ REGS.fp()->isModuleFrame());
+ MOZ_ASSERT_IF(!script->isModule() && script->isAsync(),
+ REGS.fp()->isFunctionFrame());
+ ReservedRooted<JSObject*> obj(&rootObject0, &REGS.sp[-1].toObject());
+ if (!AbstractGeneratorObject::suspend(
+ cx, obj, REGS.fp(), REGS.pc,
+ script->nfixed() + REGS.stackDepth() - 2)) {
+ goto error;
+ }
+
+ REGS.sp--;
+ POP_RETURN_VALUE();
+
+ goto successful_return_continuation;
+ }
+
+ CASE(ResumeKind) {
+ GeneratorResumeKind resumeKind = ResumeKindFromPC(REGS.pc);
+ PUSH_INT32(int32_t(resumeKind));
+ }
+ END_CASE(ResumeKind)
+
+ CASE(CheckResumeKind) {
+ int32_t kindInt = REGS.sp[-1].toInt32();
+ GeneratorResumeKind resumeKind = IntToResumeKind(kindInt);
+ if (MOZ_UNLIKELY(resumeKind != GeneratorResumeKind::Next)) {
+ ReservedRooted<Value> val(&rootValue0, REGS.sp[-3]);
+ Rooted<AbstractGeneratorObject*> gen(
+ cx, &REGS.sp[-2].toObject().as<AbstractGeneratorObject>());
+ MOZ_ALWAYS_FALSE(GeneratorThrowOrReturn(cx, activation.regs().fp(), gen,
+ val, resumeKind));
+ goto error;
+ }
+ REGS.sp -= 2;
+ }
+ END_CASE(CheckResumeKind)
+
+ CASE(Resume) {
+ {
+ Rooted<AbstractGeneratorObject*> gen(
+ cx, &REGS.sp[-3].toObject().as<AbstractGeneratorObject>());
+ ReservedRooted<Value> val(&rootValue0, REGS.sp[-2]);
+ ReservedRooted<Value> resumeKindVal(&rootValue1, REGS.sp[-1]);
+
+ // popInlineFrame expects there to be an additional value on the stack
+ // to pop off, so leave "gen" on the stack.
+ REGS.sp -= 1;
+
+ if (!AbstractGeneratorObject::resume(cx, activation, gen, val,
+ resumeKindVal)) {
+ goto error;
+ }
+
+ JSScript* generatorScript = REGS.fp()->script();
+ if (cx->realm() != generatorScript->realm()) {
+ cx->enterRealmOf(generatorScript);
+ }
+ SET_SCRIPT(generatorScript);
+
+ if (!probes::EnterScript(cx, generatorScript,
+ generatorScript->function(), REGS.fp())) {
+ goto error;
+ }
+
+ if (!DebugAPI::onResumeFrame(cx, REGS.fp())) {
+ if (cx->isPropagatingForcedReturn()) {
+ MOZ_ASSERT_IF(
+ REGS.fp()
+ ->callee()
+ .isGenerator(), // as opposed to an async function
+ gen->isClosed());
+ }
+ goto error;
+ }
+ }
+ ADVANCE_AND_DISPATCH(0);
+ }
+
+ CASE(AfterYield) {
+ // AbstractGeneratorObject::resume takes care of setting the frame's
+ // debuggee flag.
+ MOZ_ASSERT_IF(REGS.fp()->script()->isDebuggee(), REGS.fp()->isDebuggee());
+ COUNT_COVERAGE();
+ }
+ END_CASE(AfterYield)
+
+ CASE(FinalYieldRval) {
+ ReservedRooted<JSObject*> gen(&rootObject0, &REGS.sp[-1].toObject());
+ REGS.sp--;
+ AbstractGeneratorObject::finalSuspend(gen);
+ goto successful_return_continuation;
+ }
+
+ CASE(CheckClassHeritage) {
+ HandleValue heritage = REGS.stackHandleAt(-1);
+
+ if (!CheckClassHeritageOperation(cx, heritage)) {
+ goto error;
+ }
+ }
+ END_CASE(CheckClassHeritage)
+
+ CASE(BuiltinObject) {
+ auto kind = BuiltinObjectKind(GET_UINT8(REGS.pc));
+ JSObject* builtin = BuiltinObjectOperation(cx, kind);
+ if (!builtin) {
+ goto error;
+ }
+ PUSH_OBJECT(*builtin);
+ }
+ END_CASE(BuiltinObject)
+
+ CASE(FunWithProto) {
+ ReservedRooted<JSObject*> proto(&rootObject1, &REGS.sp[-1].toObject());
+
+ /* Load the specified function object literal. */
+ ReservedRooted<JSFunction*> fun(&rootFunction0,
+ script->getFunction(REGS.pc));
+
+ JSObject* obj =
+ FunWithProtoOperation(cx, fun, REGS.fp()->environmentChain(), proto);
+ if (!obj) {
+ goto error;
+ }
+
+ REGS.sp[-1].setObject(*obj);
+ }
+ END_CASE(FunWithProto)
+
+ CASE(ObjWithProto) {
+ JSObject* obj = ObjectWithProtoOperation(cx, REGS.stackHandleAt(-1));
+ if (!obj) {
+ goto error;
+ }
+
+ REGS.sp[-1].setObject(*obj);
+ }
+ END_CASE(ObjWithProto)
+
+ CASE(InitHomeObject) {
+ MOZ_ASSERT(REGS.stackDepth() >= 2);
+
+ /* Load the function to be initialized */
+ JSFunction* func = &REGS.sp[-2].toObject().as<JSFunction>();
+ MOZ_ASSERT(func->allowSuperProperty());
+
+ /* Load the home object */
+ JSObject* obj = &REGS.sp[-1].toObject();
+ MOZ_ASSERT(obj->is<PlainObject>() || obj->is<JSFunction>());
+
+ func->setExtendedSlot(FunctionExtended::METHOD_HOMEOBJECT_SLOT,
+ ObjectValue(*obj));
+ REGS.sp--;
+ }
+ END_CASE(InitHomeObject)
+
+ CASE(SuperBase) {
+ JSFunction& superEnvFunc = REGS.sp[-1].toObject().as<JSFunction>();
+ MOZ_ASSERT(superEnvFunc.allowSuperProperty());
+ MOZ_ASSERT(superEnvFunc.baseScript()->needsHomeObject());
+ const Value& homeObjVal = superEnvFunc.getExtendedSlot(
+ FunctionExtended::METHOD_HOMEOBJECT_SLOT);
+
+ JSObject* homeObj = &homeObjVal.toObject();
+ JSObject* superBase = HomeObjectSuperBase(homeObj);
+
+ REGS.sp[-1].setObjectOrNull(superBase);
+ }
+ END_CASE(SuperBase)
+
+ CASE(NewTarget) {
+ PUSH_COPY(REGS.fp()->newTarget());
+ MOZ_ASSERT(REGS.sp[-1].isObject() || REGS.sp[-1].isUndefined());
+ }
+ END_CASE(NewTarget)
+
+ CASE(ImportMeta) {
+ JSObject* metaObject = ImportMetaOperation(cx, script);
+ if (!metaObject) {
+ goto error;
+ }
+
+ PUSH_OBJECT(*metaObject);
+ }
+ END_CASE(ImportMeta)
+
+ CASE(DynamicImport) {
+ ReservedRooted<Value> options(&rootValue0, REGS.sp[-1]);
+ REGS.sp--;
+
+ ReservedRooted<Value> specifier(&rootValue1);
+ POP_COPY_TO(specifier);
+
+ JSObject* promise =
+ StartDynamicModuleImport(cx, script, specifier, options);
+ if (!promise) goto error;
+
+ PUSH_OBJECT(*promise);
+ }
+ END_CASE(DynamicImport)
+
+ CASE(EnvCallee) {
+ uint8_t numHops = GET_UINT8(REGS.pc);
+ JSObject* env = &REGS.fp()->environmentChain()->as<EnvironmentObject>();
+ for (unsigned i = 0; i < numHops; i++) {
+ env = &env->as<EnvironmentObject>().enclosingEnvironment();
+ }
+ PUSH_OBJECT(env->as<CallObject>().callee());
+ }
+ END_CASE(EnvCallee)
+
+ CASE(SuperFun) {
+ JSObject* superEnvFunc = &REGS.sp[-1].toObject();
+ JSObject* superFun = SuperFunOperation(superEnvFunc);
+ REGS.sp[-1].setObjectOrNull(superFun);
+ }
+ END_CASE(SuperFun)
+
+ CASE(CheckObjCoercible) {
+ ReservedRooted<Value> checkVal(&rootValue0, REGS.sp[-1]);
+ if (checkVal.isNullOrUndefined()) {
+ MOZ_ALWAYS_FALSE(ThrowObjectCoercible(cx, checkVal));
+ goto error;
+ }
+ }
+ END_CASE(CheckObjCoercible)
+
+ CASE(DebugCheckSelfHosted) {
+#ifdef DEBUG
+ ReservedRooted<Value> checkVal(&rootValue0, REGS.sp[-1]);
+ if (!Debug_CheckSelfHosted(cx, checkVal)) {
+ goto error;
+ }
+#endif
+ }
+ END_CASE(DebugCheckSelfHosted)
+
+ CASE(IsConstructing) { PUSH_MAGIC(JS_IS_CONSTRUCTING); }
+ END_CASE(IsConstructing)
+
+ CASE(Inc) {
+ MutableHandleValue val = REGS.stackHandleAt(-1);
+ if (!IncOperation(cx, val, val)) {
+ goto error;
+ }
+ }
+ END_CASE(Inc)
+
+ CASE(Dec) {
+ MutableHandleValue val = REGS.stackHandleAt(-1);
+ if (!DecOperation(cx, val, val)) {
+ goto error;
+ }
+ }
+ END_CASE(Dec)
+
+ CASE(ToNumeric) {
+ if (!ToNumeric(cx, REGS.stackHandleAt(-1))) {
+ goto error;
+ }
+ }
+ END_CASE(ToNumeric)
+
+ CASE(BigInt) { PUSH_BIGINT(script->getBigInt(REGS.pc)); }
+ END_CASE(BigInt)
+
+ DEFAULT() {
+ char numBuf[12];
+ SprintfLiteral(numBuf, "%d", *REGS.pc);
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_BYTECODE, numBuf);
+ goto error;
+ }
+
+ } /* interpreter loop */
+
+ MOZ_CRASH("Interpreter loop exited via fallthrough");
+
+error:
+ switch (HandleError(cx, REGS)) {
+ case SuccessfulReturnContinuation:
+ goto successful_return_continuation;
+
+ case ErrorReturnContinuation:
+ interpReturnOK = false;
+ goto return_continuation;
+
+ case CatchContinuation:
+ ADVANCE_AND_DISPATCH(0);
+
+ case FinallyContinuation: {
+ /*
+ * Push (exception, true) pair for finally to indicate that we
+ * should rethrow the exception.
+ */
+ ReservedRooted<Value> exception(&rootValue0);
+ if (!cx->getPendingException(&exception)) {
+ interpReturnOK = false;
+ goto return_continuation;
+ }
+ PUSH_COPY(exception);
+ PUSH_BOOLEAN(true);
+ cx->clearPendingException();
+ }
+ ADVANCE_AND_DISPATCH(0);
+ }
+
+ MOZ_CRASH("Invalid HandleError continuation");
+
+exit:
+ if (MOZ_LIKELY(!frameHalfInitialized)) {
+ interpReturnOK =
+ DebugAPI::onLeaveFrame(cx, REGS.fp(), REGS.pc, interpReturnOK);
+
+ REGS.fp()->epilogue(cx, REGS.pc);
+ }
+
+ gc::MaybeVerifyBarriers(cx, true);
+
+ /*
+ * This path is used when it's guaranteed the method can be finished
+ * inside the JIT.
+ */
+leave_on_safe_point:
+
+ if (interpReturnOK) {
+ state.setReturnValue(activation.entryFrame()->returnValue());
+ }
+
+ return interpReturnOK;
+
+prologue_error:
+ interpReturnOK = false;
+ frameHalfInitialized = true;
+ goto prologue_return_continuation;
+}
+
+bool js::ThrowOperation(JSContext* cx, HandleValue v) {
+ MOZ_ASSERT(!cx->isExceptionPending());
+ cx->setPendingException(v, ShouldCaptureStack::Maybe);
+ return false;
+}
+
+bool js::GetProperty(JSContext* cx, HandleValue v, Handle<PropertyName*> name,
+ MutableHandleValue vp) {
+ if (name == cx->names().length) {
+ // Fast path for strings, arrays and arguments.
+ if (GetLengthProperty(v, vp)) {
+ return true;
+ }
+ }
+
+ // Optimize common cases like (2).toString() or "foo".valueOf() to not
+ // create a wrapper object.
+ if (v.isPrimitive() && !v.isNullOrUndefined()) {
+ JSObject* proto;
+
+ switch (v.type()) {
+ case ValueType::Double:
+ case ValueType::Int32:
+ proto = GlobalObject::getOrCreateNumberPrototype(cx, cx->global());
+ break;
+ case ValueType::Boolean:
+ proto = GlobalObject::getOrCreateBooleanPrototype(cx, cx->global());
+ break;
+ case ValueType::String:
+ proto = GlobalObject::getOrCreateStringPrototype(cx, cx->global());
+ break;
+ case ValueType::Symbol:
+ proto = GlobalObject::getOrCreateSymbolPrototype(cx, cx->global());
+ break;
+ case ValueType::BigInt:
+ proto = GlobalObject::getOrCreateBigIntPrototype(cx, cx->global());
+ break;
+#ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive: {
+ RootedObject obj(cx, &v.toExtendedPrimitive());
+ RootedId id(cx, NameToId(name));
+ return ExtendedPrimitiveGetProperty(cx, obj, v, id, vp);
+ }
+#endif
+ case ValueType::Undefined:
+ case ValueType::Null:
+ case ValueType::Magic:
+ case ValueType::PrivateGCThing:
+ case ValueType::Object:
+ MOZ_CRASH("unexpected type");
+ }
+
+ if (!proto) {
+ return false;
+ }
+
+ if (GetPropertyPure(cx, proto, NameToId(name), vp.address())) {
+ return true;
+ }
+ }
+
+ RootedValue receiver(cx, v);
+ RootedObject obj(
+ cx, ToObjectFromStackForPropertyAccess(cx, v, JSDVG_SEARCH_STACK, name));
+ if (!obj) {
+ return false;
+ }
+
+ return GetProperty(cx, obj, receiver, name, vp);
+}
+
+JSObject* js::Lambda(JSContext* cx, HandleFunction fun, HandleObject parent) {
+ JSFunction* clone;
+ if (fun->isNativeFun()) {
+ MOZ_ASSERT(IsAsmJSModule(fun));
+ clone = CloneAsmJSModuleFunction(cx, fun);
+ } else {
+ RootedObject proto(cx, fun->staticPrototype());
+ clone = CloneFunctionReuseScript(cx, fun, parent, proto);
+ }
+ if (!clone) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(fun->global() == clone->global());
+ return clone;
+}
+
+JSObject* js::BindVarOperation(JSContext* cx, JSObject* envChain) {
+ // Note: BindVarOperation has an unused cx argument because the JIT callVM
+ // machinery requires this.
+ return &GetVariablesObject(envChain);
+}
+
+JSObject* js::ImportMetaOperation(JSContext* cx, HandleScript script) {
+ RootedObject module(cx, GetModuleObjectForScript(script));
+ MOZ_ASSERT(module);
+ return GetOrCreateModuleMetaObject(cx, module);
+}
+
+JSObject* js::BuiltinObjectOperation(JSContext* cx, BuiltinObjectKind kind) {
+ return GetOrCreateBuiltinObject(cx, kind);
+}
+
+bool js::ThrowMsgOperation(JSContext* cx, const unsigned throwMsgKind) {
+ auto errorNum = ThrowMsgKindToErrNum(ThrowMsgKind(throwMsgKind));
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, errorNum);
+ return false;
+}
+
+bool js::GetAndClearExceptionAndStack(JSContext* cx, MutableHandleValue res,
+ MutableHandle<SavedFrame*> stack) {
+ if (!cx->getPendingException(res)) {
+ return false;
+ }
+ stack.set(cx->getPendingExceptionStack());
+ cx->clearPendingException();
+
+ // Allow interrupting deeply nested exception handling.
+ return CheckForInterrupt(cx);
+}
+
+bool js::GetAndClearException(JSContext* cx, MutableHandleValue res) {
+ Rooted<SavedFrame*> stack(cx);
+ return GetAndClearExceptionAndStack(cx, res, &stack);
+}
+
+template <bool strict>
+bool js::DelPropOperation(JSContext* cx, HandleValue val,
+ Handle<PropertyName*> name, bool* res) {
+ const int valIndex = -1;
+ RootedObject obj(cx,
+ ToObjectFromStackForPropertyAccess(cx, val, valIndex, name));
+ if (!obj) {
+ return false;
+ }
+
+ RootedId id(cx, NameToId(name));
+ ObjectOpResult result;
+ if (!DeleteProperty(cx, obj, id, result)) {
+ return false;
+ }
+
+ if (strict) {
+ if (!result) {
+ return result.reportError(cx, obj, id);
+ }
+ *res = true;
+ } else {
+ *res = result.ok();
+ }
+ return true;
+}
+
+template bool js::DelPropOperation<true>(JSContext* cx, HandleValue val,
+ Handle<PropertyName*> name, bool* res);
+template bool js::DelPropOperation<false>(JSContext* cx, HandleValue val,
+ Handle<PropertyName*> name,
+ bool* res);
+
+template <bool strict>
+bool js::DelElemOperation(JSContext* cx, HandleValue val, HandleValue index,
+ bool* res) {
+ const int valIndex = -2;
+ RootedObject obj(
+ cx, ToObjectFromStackForPropertyAccess(cx, val, valIndex, index));
+ if (!obj) {
+ return false;
+ }
+
+ RootedId id(cx);
+ if (!ToPropertyKey(cx, index, &id)) {
+ return false;
+ }
+ ObjectOpResult result;
+ if (!DeleteProperty(cx, obj, id, result)) {
+ return false;
+ }
+
+ if (strict) {
+ if (!result) {
+ return result.reportError(cx, obj, id);
+ }
+ *res = true;
+ } else {
+ *res = result.ok();
+ }
+ return true;
+}
+
+template bool js::DelElemOperation<true>(JSContext*, HandleValue, HandleValue,
+ bool*);
+template bool js::DelElemOperation<false>(JSContext*, HandleValue, HandleValue,
+ bool*);
+
+bool js::SetObjectElement(JSContext* cx, HandleObject obj, HandleValue index,
+ HandleValue value, bool strict) {
+ RootedId id(cx);
+ if (!ToPropertyKey(cx, index, &id)) {
+ return false;
+ }
+ RootedValue receiver(cx, ObjectValue(*obj));
+ return SetObjectElementOperation(cx, obj, id, value, receiver, strict);
+}
+
+bool js::SetObjectElementWithReceiver(JSContext* cx, HandleObject obj,
+ HandleValue index, HandleValue value,
+ HandleValue receiver, bool strict) {
+ RootedId id(cx);
+ if (!ToPropertyKey(cx, index, &id)) {
+ return false;
+ }
+ return SetObjectElementOperation(cx, obj, id, value, receiver, strict);
+}
+
+bool js::AddValues(JSContext* cx, MutableHandleValue lhs,
+ MutableHandleValue rhs, MutableHandleValue res) {
+ return AddOperation(cx, lhs, rhs, res);
+}
+
+bool js::SubValues(JSContext* cx, MutableHandleValue lhs,
+ MutableHandleValue rhs, MutableHandleValue res) {
+ return SubOperation(cx, lhs, rhs, res);
+}
+
+bool js::MulValues(JSContext* cx, MutableHandleValue lhs,
+ MutableHandleValue rhs, MutableHandleValue res) {
+ return MulOperation(cx, lhs, rhs, res);
+}
+
+bool js::DivValues(JSContext* cx, MutableHandleValue lhs,
+ MutableHandleValue rhs, MutableHandleValue res) {
+ return DivOperation(cx, lhs, rhs, res);
+}
+
+bool js::ModValues(JSContext* cx, MutableHandleValue lhs,
+ MutableHandleValue rhs, MutableHandleValue res) {
+ return ModOperation(cx, lhs, rhs, res);
+}
+
+bool js::PowValues(JSContext* cx, MutableHandleValue lhs,
+ MutableHandleValue rhs, MutableHandleValue res) {
+ return PowOperation(cx, lhs, rhs, res);
+}
+
+bool js::BitNot(JSContext* cx, MutableHandleValue in, MutableHandleValue res) {
+ return BitNotOperation(cx, in, res);
+}
+
+bool js::BitXor(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res) {
+ return BitXorOperation(cx, lhs, rhs, res);
+}
+
+bool js::BitOr(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res) {
+ return BitOrOperation(cx, lhs, rhs, res);
+}
+
+bool js::BitAnd(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res) {
+ return BitAndOperation(cx, lhs, rhs, res);
+}
+
+bool js::BitLsh(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res) {
+ return BitLshOperation(cx, lhs, rhs, res);
+}
+
+bool js::BitRsh(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res) {
+ return BitRshOperation(cx, lhs, rhs, res);
+}
+
+bool js::UrshValues(JSContext* cx, MutableHandleValue lhs,
+ MutableHandleValue rhs, MutableHandleValue res) {
+ return UrshOperation(cx, lhs, rhs, res);
+}
+
+bool js::LessThan(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ bool* res) {
+ return LessThanOperation(cx, lhs, rhs, res);
+}
+
+bool js::LessThanOrEqual(JSContext* cx, MutableHandleValue lhs,
+ MutableHandleValue rhs, bool* res) {
+ return LessThanOrEqualOperation(cx, lhs, rhs, res);
+}
+
+bool js::GreaterThan(JSContext* cx, MutableHandleValue lhs,
+ MutableHandleValue rhs, bool* res) {
+ return GreaterThanOperation(cx, lhs, rhs, res);
+}
+
+bool js::GreaterThanOrEqual(JSContext* cx, MutableHandleValue lhs,
+ MutableHandleValue rhs, bool* res) {
+ return GreaterThanOrEqualOperation(cx, lhs, rhs, res);
+}
+
+bool js::AtomicIsLockFree(JSContext* cx, HandleValue in, int* out) {
+ int i;
+ if (!ToInt32(cx, in, &i)) {
+ return false;
+ }
+ *out = js::jit::AtomicOperations::isLockfreeJS(i);
+ return true;
+}
+
+bool js::DeleteNameOperation(JSContext* cx, Handle<PropertyName*> name,
+ HandleObject scopeObj, MutableHandleValue res) {
+ RootedObject scope(cx), pobj(cx);
+ PropertyResult prop;
+ if (!LookupName(cx, name, scopeObj, &scope, &pobj, &prop)) {
+ return false;
+ }
+
+ if (!scope) {
+ // Return true for non-existent names.
+ res.setBoolean(true);
+ return true;
+ }
+
+ ObjectOpResult result;
+ RootedId id(cx, NameToId(name));
+ if (!DeleteProperty(cx, scope, id, result)) {
+ return false;
+ }
+
+ bool status = result.ok();
+ res.setBoolean(status);
+
+ if (status) {
+ // Deleting a name from the global object removes it from [[VarNames]].
+ if (pobj == scope && scope->is<GlobalObject>()) {
+ scope->as<GlobalObject>().removeFromVarNames(name);
+ }
+ }
+
+ return true;
+}
+
+bool js::ImplicitThisOperation(JSContext* cx, HandleObject scopeObj,
+ Handle<PropertyName*> name,
+ MutableHandleValue res) {
+ RootedObject obj(cx);
+ if (!LookupNameWithGlobalDefault(cx, name, scopeObj, &obj)) {
+ return false;
+ }
+
+ res.set(ComputeImplicitThis(obj));
+ return true;
+}
+
+unsigned js::GetInitDataPropAttrs(JSOp op) {
+ switch (op) {
+ case JSOp::InitProp:
+ case JSOp::InitElem:
+ return JSPROP_ENUMERATE;
+ case JSOp::InitLockedProp:
+ case JSOp::InitLockedElem:
+ return JSPROP_PERMANENT | JSPROP_READONLY;
+ case JSOp::InitHiddenProp:
+ case JSOp::InitHiddenElem:
+ // Non-enumerable, but writable and configurable
+ return 0;
+ default:;
+ }
+ MOZ_CRASH("Unknown data initprop");
+}
+
+static bool InitGetterSetterOperation(JSContext* cx, jsbytecode* pc,
+ HandleObject obj, HandleId id,
+ HandleObject val) {
+ MOZ_ASSERT(val->isCallable());
+
+ JSOp op = JSOp(*pc);
+
+ unsigned attrs = 0;
+ if (!IsHiddenInitOp(op)) {
+ attrs |= JSPROP_ENUMERATE;
+ }
+
+ if (op == JSOp::InitPropGetter || op == JSOp::InitElemGetter ||
+ op == JSOp::InitHiddenPropGetter || op == JSOp::InitHiddenElemGetter) {
+ return DefineAccessorProperty(cx, obj, id, val, nullptr, attrs);
+ }
+
+ MOZ_ASSERT(op == JSOp::InitPropSetter || op == JSOp::InitElemSetter ||
+ op == JSOp::InitHiddenPropSetter ||
+ op == JSOp::InitHiddenElemSetter);
+ return DefineAccessorProperty(cx, obj, id, nullptr, val, attrs);
+}
+
+bool js::InitPropGetterSetterOperation(JSContext* cx, jsbytecode* pc,
+ HandleObject obj,
+ Handle<PropertyName*> name,
+ HandleObject val) {
+ RootedId id(cx, NameToId(name));
+ return InitGetterSetterOperation(cx, pc, obj, id, val);
+}
+
+bool js::InitElemGetterSetterOperation(JSContext* cx, jsbytecode* pc,
+ HandleObject obj, HandleValue idval,
+ HandleObject val) {
+ RootedId id(cx);
+ if (!ToPropertyKey(cx, idval, &id)) {
+ return false;
+ }
+
+ return InitGetterSetterOperation(cx, pc, obj, id, val);
+}
+
+bool js::SpreadCallOperation(JSContext* cx, HandleScript script, jsbytecode* pc,
+ HandleValue thisv, HandleValue callee,
+ HandleValue arr, HandleValue newTarget,
+ MutableHandleValue res) {
+ Rooted<ArrayObject*> aobj(cx, &arr.toObject().as<ArrayObject>());
+ uint32_t length = aobj->length();
+ JSOp op = JSOp(*pc);
+ bool constructing = op == JSOp::SpreadNew || op == JSOp::SpreadSuperCall;
+
+ // {Construct,Invoke}Args::init does this too, but this gives us a better
+ // error message.
+ if (length > ARGS_LENGTH_MAX) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ constructing ? JSMSG_TOO_MANY_CON_SPREADARGS
+ : JSMSG_TOO_MANY_FUN_SPREADARGS);
+ return false;
+ }
+
+ // Do our own checks for the callee being a function, as Invoke uses the
+ // expression decompiler to decompile the callee stack operand based on
+ // the number of arguments. Spread operations have the callee at sp - 3
+ // when not constructing, and sp - 4 when constructing.
+ if (callee.isPrimitive()) {
+ return ReportIsNotFunction(cx, callee, 2 + constructing,
+ constructing ? CONSTRUCT : NO_CONSTRUCT);
+ }
+
+ if (!callee.toObject().isCallable()) {
+ return ReportIsNotFunction(cx, callee, 2 + constructing,
+ constructing ? CONSTRUCT : NO_CONSTRUCT);
+ }
+
+ // The object must be an array with dense elements and no holes. Baseline's
+ // optimized spread call stubs rely on this.
+ MOZ_ASSERT(IsPackedArray(aobj));
+
+ if (constructing) {
+ if (!StackCheckIsConstructorCalleeNewTarget(cx, callee, newTarget)) {
+ return false;
+ }
+
+ ConstructArgs cargs(cx);
+ if (!cargs.init(cx, length)) {
+ return false;
+ }
+
+ if (!GetElements(cx, aobj, length, cargs.array())) {
+ return false;
+ }
+
+ RootedObject obj(cx);
+ if (!Construct(cx, callee, cargs, newTarget, &obj)) {
+ return false;
+ }
+ res.setObject(*obj);
+ } else {
+ InvokeArgs args(cx);
+ if (!args.init(cx, length)) {
+ return false;
+ }
+
+ if (!GetElements(cx, aobj, length, args.array())) {
+ return false;
+ }
+
+ if ((op == JSOp::SpreadEval || op == JSOp::StrictSpreadEval) &&
+ cx->global()->valueIsEval(callee)) {
+ if (!DirectEval(cx, args.get(0), res)) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(op == JSOp::SpreadCall || op == JSOp::SpreadEval ||
+ op == JSOp::StrictSpreadEval,
+ "bad spread opcode");
+
+ if (!Call(cx, callee, thisv, args, res)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool OptimizeArraySpreadCall(JSContext* cx, HandleObject obj,
+ MutableHandleValue result) {
+ MOZ_ASSERT(result.isUndefined());
+
+ // Optimize spread call by skipping spread operation when following
+ // conditions are met:
+ // * the argument is an array
+ // * the array has no hole
+ // * array[@@iterator] is not modified
+ // * the array's prototype is Array.prototype
+ // * Array.prototype[@@iterator] is not modified
+ // * %ArrayIteratorPrototype%.next is not modified
+ if (!IsPackedArray(obj)) {
+ return true;
+ }
+
+ ForOfPIC::Chain* stubChain = ForOfPIC::getOrCreate(cx);
+ if (!stubChain) {
+ return false;
+ }
+
+ bool optimized;
+ if (!stubChain->tryOptimizeArray(cx, obj.as<ArrayObject>(), &optimized)) {
+ return false;
+ }
+ if (!optimized) {
+ return true;
+ }
+
+ result.setObject(*obj);
+ return true;
+}
+
+static bool OptimizeArgumentsSpreadCall(JSContext* cx, HandleObject obj,
+ MutableHandleValue result) {
+ MOZ_ASSERT(result.isUndefined());
+
+ // Optimize spread call by skipping the spread operation when the following
+ // conditions are met:
+ // * the argument is an arguments object
+ // * the arguments object has no deleted elements
+ // * arguments.length is not overridden
+ // * arguments[@@iterator] is not overridden
+ // * %ArrayIteratorPrototype%.next is not modified
+
+ if (!obj->is<ArgumentsObject>()) {
+ return true;
+ }
+
+ Handle<ArgumentsObject*> args = obj.as<ArgumentsObject>();
+ if (args->hasOverriddenElement() || args->hasOverriddenLength() ||
+ args->hasOverriddenIterator()) {
+ return true;
+ }
+
+ ForOfPIC::Chain* stubChain = ForOfPIC::getOrCreate(cx);
+ if (!stubChain) {
+ return false;
+ }
+
+ bool optimized;
+ if (!stubChain->tryOptimizeArrayIteratorNext(cx, &optimized)) {
+ return false;
+ }
+ if (!optimized) {
+ return true;
+ }
+
+ auto* array = ArrayFromArgumentsObject(cx, args);
+ if (!array) {
+ return false;
+ }
+
+ result.setObject(*array);
+ return true;
+}
+
+bool js::OptimizeSpreadCall(JSContext* cx, HandleValue arg,
+ MutableHandleValue result) {
+ // This function returns |undefined| if the spread operation can't be
+ // optimized.
+ result.setUndefined();
+
+ if (!arg.isObject()) {
+ return true;
+ }
+
+ RootedObject obj(cx, &arg.toObject());
+ if (!OptimizeArraySpreadCall(cx, obj, result)) {
+ return false;
+ }
+ if (result.isObject()) {
+ return true;
+ }
+ if (!OptimizeArgumentsSpreadCall(cx, obj, result)) {
+ return false;
+ }
+ if (result.isObject()) {
+ return true;
+ }
+
+ MOZ_ASSERT(result.isUndefined());
+ return true;
+}
+
+ArrayObject* js::ArrayFromArgumentsObject(JSContext* cx,
+ Handle<ArgumentsObject*> args) {
+ MOZ_ASSERT(!args->hasOverriddenLength());
+ MOZ_ASSERT(!args->hasOverriddenElement());
+
+ uint32_t length = args->initialLength();
+ auto* array = NewDenseFullyAllocatedArray(cx, length);
+ if (!array) {
+ return nullptr;
+ }
+ array->setDenseInitializedLength(length);
+
+ for (uint32_t index = 0; index < length; index++) {
+ const Value& v = args->element(index);
+ array->initDenseElement(index, v);
+ }
+
+ return array;
+}
+
+JSObject* js::NewObjectOperation(JSContext* cx, HandleScript script,
+ const jsbytecode* pc) {
+ if (JSOp(*pc) == JSOp::NewObject) {
+ Rooted<SharedShape*> shape(cx, script->getShape(pc));
+ return PlainObject::createWithShape(cx, shape);
+ }
+
+ MOZ_ASSERT(JSOp(*pc) == JSOp::NewInit);
+ return NewPlainObject(cx);
+}
+
+JSObject* js::NewPlainObjectBaselineFallback(JSContext* cx,
+ Handle<SharedShape*> shape,
+ gc::AllocKind allocKind,
+ gc::AllocSite* site) {
+ MOZ_ASSERT(shape->getObjectClass() == &PlainObject::class_);
+
+ mozilla::Maybe<AutoRealm> ar;
+ if (cx->realm() != shape->realm()) {
+ MOZ_ASSERT(cx->compartment() == shape->compartment());
+ ar.emplace(cx, shape);
+ }
+
+ gc::Heap initialHeap = site->initialHeap();
+ return NativeObject::create(cx, allocKind, initialHeap, shape, site);
+}
+
+JSObject* js::NewPlainObjectOptimizedFallback(JSContext* cx,
+ Handle<SharedShape*> shape,
+ gc::AllocKind allocKind,
+ gc::Heap initialHeap) {
+ MOZ_ASSERT(shape->getObjectClass() == &PlainObject::class_);
+
+ mozilla::Maybe<AutoRealm> ar;
+ if (cx->realm() != shape->realm()) {
+ MOZ_ASSERT(cx->compartment() == shape->compartment());
+ ar.emplace(cx, shape);
+ }
+
+ gc::AllocSite* site = cx->zone()->optimizedAllocSite();
+ return NativeObject::create(cx, allocKind, initialHeap, shape, site);
+}
+
+ArrayObject* js::NewArrayOperation(
+ JSContext* cx, uint32_t length,
+ NewObjectKind newKind /* = GenericObject */) {
+ return NewDenseFullyAllocatedArray(cx, length, newKind);
+}
+
+ArrayObject* js::NewArrayObjectBaselineFallback(JSContext* cx, uint32_t length,
+ gc::AllocKind allocKind,
+ gc::AllocSite* site) {
+ NewObjectKind newKind =
+ site->initialHeap() == gc::Heap::Tenured ? TenuredObject : GenericObject;
+ ArrayObject* array = NewDenseFullyAllocatedArray(cx, length, newKind, site);
+ // It's important that we allocate an object with the alloc kind we were
+ // expecting so that a new arena gets allocated if the current arena for that
+ // kind is full.
+ MOZ_ASSERT_IF(array && array->isTenured(),
+ array->asTenured().getAllocKind() == allocKind);
+ return array;
+}
+
+ArrayObject* js::NewArrayObjectOptimizedFallback(JSContext* cx, uint32_t length,
+ gc::AllocKind allocKind,
+ NewObjectKind newKind) {
+ gc::AllocSite* site = cx->zone()->optimizedAllocSite();
+ ArrayObject* array = NewDenseFullyAllocatedArray(cx, length, newKind, site);
+ // It's important that we allocate an object with the alloc kind we were
+ // expecting so that a new arena gets allocated if the current arena for that
+ // kind is full.
+ MOZ_ASSERT_IF(array && array->isTenured(),
+ array->asTenured().getAllocKind() == allocKind);
+ return array;
+}
+
+void js::ReportRuntimeLexicalError(JSContext* cx, unsigned errorNumber,
+ HandleId id) {
+ MOZ_ASSERT(errorNumber == JSMSG_UNINITIALIZED_LEXICAL ||
+ errorNumber == JSMSG_BAD_CONST_ASSIGN);
+ if (UniqueChars printable =
+ IdToPrintableUTF8(cx, id, IdToPrintableBehavior::IdIsIdentifier)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber,
+ printable.get());
+ }
+}
+
+void js::ReportRuntimeLexicalError(JSContext* cx, unsigned errorNumber,
+ Handle<PropertyName*> name) {
+ RootedId id(cx, NameToId(name));
+ ReportRuntimeLexicalError(cx, errorNumber, id);
+}
+
+void js::ReportRuntimeLexicalError(JSContext* cx, unsigned errorNumber,
+ HandleScript script, jsbytecode* pc) {
+ JSOp op = JSOp(*pc);
+ MOZ_ASSERT(op == JSOp::CheckLexical || op == JSOp::CheckAliasedLexical ||
+ op == JSOp::ThrowSetConst || op == JSOp::GetImport);
+
+ Rooted<PropertyName*> name(cx);
+ if (IsLocalOp(op)) {
+ name = FrameSlotName(script, pc)->asPropertyName();
+ } else if (IsAliasedVarOp(op)) {
+ name = EnvironmentCoordinateNameSlow(script, pc);
+ } else {
+ MOZ_ASSERT(IsAtomOp(op));
+ name = script->getName(pc);
+ }
+
+ ReportRuntimeLexicalError(cx, errorNumber, name);
+}
+
+void js::ReportRuntimeRedeclaration(JSContext* cx, Handle<PropertyName*> name,
+ const char* redeclKind) {
+ if (UniqueChars printable = AtomToPrintableString(cx, name)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_REDECLARED_VAR, redeclKind,
+ printable.get());
+ }
+}
+
+bool js::ThrowCheckIsObject(JSContext* cx, CheckIsObjectKind kind) {
+ switch (kind) {
+ case CheckIsObjectKind::IteratorNext:
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_ITER_METHOD_RETURNED_PRIMITIVE, "next");
+ break;
+ case CheckIsObjectKind::IteratorReturn:
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_ITER_METHOD_RETURNED_PRIMITIVE, "return");
+ break;
+ case CheckIsObjectKind::IteratorThrow:
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_ITER_METHOD_RETURNED_PRIMITIVE, "throw");
+ break;
+ case CheckIsObjectKind::GetIterator:
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_GET_ITER_RETURNED_PRIMITIVE);
+ break;
+ case CheckIsObjectKind::GetAsyncIterator:
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_GET_ASYNC_ITER_RETURNED_PRIMITIVE);
+ break;
+ default:
+ MOZ_CRASH("Unknown kind");
+ }
+ return false;
+}
+
+bool js::ThrowUninitializedThis(JSContext* cx) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_UNINITIALIZED_THIS);
+ return false;
+}
+
+bool js::ThrowInitializedThis(JSContext* cx) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_REINIT_THIS);
+ return false;
+}
+
+bool js::ThrowObjectCoercible(JSContext* cx, HandleValue value) {
+ MOZ_ASSERT(value.isNullOrUndefined());
+ ReportIsNullOrUndefinedForPropertyAccess(cx, value, JSDVG_SEARCH_STACK);
+ return false;
+}
+
+bool js::SetPropertySuper(JSContext* cx, HandleValue lval, HandleValue receiver,
+ Handle<PropertyName*> name, HandleValue rval,
+ bool strict) {
+ MOZ_ASSERT(lval.isObjectOrNull());
+
+ RootedObject obj(cx, ToObjectFromStackForPropertyAccess(
+ cx, lval, JSDVG_SEARCH_STACK, name));
+ if (!obj) {
+ return false;
+ }
+
+ RootedId id(cx, NameToId(name));
+ return SetObjectElementOperation(cx, obj, id, rval, receiver, strict);
+}
+
+bool js::SetElementSuper(JSContext* cx, HandleValue lval, HandleValue receiver,
+ HandleValue index, HandleValue rval, bool strict) {
+ MOZ_ASSERT(lval.isObjectOrNull());
+
+ RootedObject obj(cx, ToObjectFromStackForPropertyAccess(
+ cx, lval, JSDVG_SEARCH_STACK, index));
+ if (!obj) {
+ return false;
+ }
+
+ return SetObjectElementWithReceiver(cx, obj, index, rval, receiver, strict);
+}
+
+bool js::LoadAliasedDebugVar(JSContext* cx, JSObject* env, jsbytecode* pc,
+ MutableHandleValue result) {
+ EnvironmentCoordinate ec(pc);
+
+ for (unsigned i = ec.hops(); i; i--) {
+ if (env->is<EnvironmentObject>()) {
+ env = &env->as<EnvironmentObject>().enclosingEnvironment();
+ } else {
+ MOZ_ASSERT(env->is<DebugEnvironmentProxy>());
+ env = &env->as<DebugEnvironmentProxy>().enclosingEnvironment();
+ }
+ }
+
+ EnvironmentObject& finalEnv =
+ env->is<EnvironmentObject>()
+ ? env->as<EnvironmentObject>()
+ : env->as<DebugEnvironmentProxy>().environment();
+
+ result.set(finalEnv.aliasedBinding(ec));
+ return true;
+}
+
+// https://tc39.es/ecma262/#sec-iteratorclose
+bool js::CloseIterOperation(JSContext* cx, HandleObject iter,
+ CompletionKind kind) {
+ // Steps 1-2 are implicit.
+
+ // Step 3
+ RootedValue returnMethod(cx);
+ bool innerResult =
+ GetProperty(cx, iter, iter, cx->names().return_, &returnMethod);
+
+ // Step 4
+ RootedValue result(cx);
+ if (innerResult) {
+ // Step 4b
+ if (returnMethod.isNullOrUndefined()) {
+ return true;
+ }
+ // Step 4c
+ if (IsCallable(returnMethod)) {
+ RootedValue thisVal(cx, ObjectValue(*iter));
+ innerResult = Call(cx, returnMethod, thisVal, &result);
+ } else {
+ innerResult = ReportIsNotFunction(cx, returnMethod);
+ }
+ }
+
+ // Step 5
+ if (kind == CompletionKind::Throw) {
+ // If we close an iterator while unwinding for an exception,
+ // the initial exception takes priority over any exception thrown
+ // while closing the iterator.
+ if (cx->isExceptionPending()) {
+ cx->clearPendingException();
+ }
+ return true;
+ }
+
+ // Step 6
+ if (!innerResult) {
+ return false;
+ }
+
+ // Step 7
+ if (!result.isObject()) {
+ return ThrowCheckIsObject(cx, CheckIsObjectKind::IteratorReturn);
+ }
+
+ // Step 8
+ return true;
+}
diff --git a/js/src/vm/Interpreter.h b/js/src/vm/Interpreter.h
new file mode 100644
index 0000000000..4bb6a93517
--- /dev/null
+++ b/js/src/vm/Interpreter.h
@@ -0,0 +1,705 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Interpreter_h
+#define vm_Interpreter_h
+
+/*
+ * JS interpreter interface.
+ */
+
+#include "jspubtd.h"
+
+#include "vm/BuiltinObjectKind.h"
+#include "vm/CheckIsObjectKind.h" // CheckIsObjectKind
+#include "vm/Stack.h"
+
+namespace js {
+
+class WithScope;
+class EnvironmentIter;
+class PlainObject;
+
+/*
+ * Convert null/undefined |thisv| into the global lexical's |this| object, and
+ * replace other primitives with boxed versions.
+ */
+extern JSObject* BoxNonStrictThis(JSContext* cx, HandleValue thisv);
+
+extern bool GetFunctionThis(JSContext* cx, AbstractFramePtr frame,
+ MutableHandleValue res);
+
+extern void GetNonSyntacticGlobalThis(JSContext* cx, HandleObject envChain,
+ MutableHandleValue res);
+
+/*
+ * numToSkip is the number of stack values the expression decompiler should skip
+ * before it reaches |v|. If it's -1, the decompiler will search the stack.
+ */
+extern bool ReportIsNotFunction(JSContext* cx, HandleValue v, int numToSkip,
+ MaybeConstruct construct = NO_CONSTRUCT);
+
+/* See ReportIsNotFunction comment for the meaning of numToSkip. */
+extern JSObject* ValueToCallable(JSContext* cx, HandleValue v,
+ int numToSkip = -1,
+ MaybeConstruct construct = NO_CONSTRUCT);
+
+// Reasons why a call could be performed, for passing onto the debugger's
+// `onNativeCall` hook.
+// `onNativeCall` hook disabled all JITs, and this needs to be handled only in
+// the interpreter.
+enum class CallReason {
+ Call,
+ // callContentFunction or constructContentFunction in self-hosted JS.
+ CallContent,
+ // Function.prototype.call or Function.prototype.apply.
+ FunCall,
+ Getter,
+ Setter,
+};
+
+/*
+ * Call or construct arguments that are stored in rooted memory.
+ *
+ * NOTE: Any necessary |GetThisValue| computation must have been performed on
+ * |args.thisv()|, likely by the interpreter when pushing |this| onto the
+ * stack. If you're not sure whether |GetThisValue| processing has been
+ * performed, use |Invoke|.
+ */
+extern bool InternalCallOrConstruct(JSContext* cx, const CallArgs& args,
+ MaybeConstruct construct,
+ CallReason reason = CallReason::Call);
+
+/*
+ * These helpers take care of the infinite-recursion check necessary for
+ * getter/setter calls.
+ */
+extern bool CallGetter(JSContext* cx, HandleValue thisv, HandleValue getter,
+ MutableHandleValue rval);
+
+extern bool CallSetter(JSContext* cx, HandleValue thisv, HandleValue setter,
+ HandleValue rval);
+
+// ES7 rev 0c1bd3004329336774cbc90de727cd0cf5f11e93
+// 7.3.12 Call(F, V, argumentsList).
+// All parameters are required, hopefully forcing callers to be careful not to
+// (say) blindly pass callee as |newTarget| when a different value should have
+// been passed. Behavior is unspecified if any element of |args| isn't
+// initialized.
+//
+// |rval| is written to *only* after |fval| and |thisv| have been consumed, so
+// |rval| *may* alias either argument.
+extern bool Call(JSContext* cx, HandleValue fval, HandleValue thisv,
+ const AnyInvokeArgs& args, MutableHandleValue rval,
+ CallReason reason = CallReason::Call);
+
+inline bool Call(JSContext* cx, HandleValue fval, HandleValue thisv,
+ MutableHandleValue rval) {
+ FixedInvokeArgs<0> args(cx);
+ return Call(cx, fval, thisv, args, rval);
+}
+
+inline bool Call(JSContext* cx, HandleValue fval, JSObject* thisObj,
+ MutableHandleValue rval) {
+ RootedValue thisv(cx, ObjectOrNullValue(thisObj));
+ FixedInvokeArgs<0> args(cx);
+ return Call(cx, fval, thisv, args, rval);
+}
+
+inline bool Call(JSContext* cx, HandleValue fval, HandleValue thisv,
+ HandleValue arg0, MutableHandleValue rval) {
+ FixedInvokeArgs<1> args(cx);
+ args[0].set(arg0);
+ return Call(cx, fval, thisv, args, rval);
+}
+
+inline bool Call(JSContext* cx, HandleValue fval, JSObject* thisObj,
+ HandleValue arg0, MutableHandleValue rval) {
+ RootedValue thisv(cx, ObjectOrNullValue(thisObj));
+ FixedInvokeArgs<1> args(cx);
+ args[0].set(arg0);
+ return Call(cx, fval, thisv, args, rval);
+}
+
+inline bool Call(JSContext* cx, HandleValue fval, HandleValue thisv,
+ HandleValue arg0, HandleValue arg1, MutableHandleValue rval) {
+ FixedInvokeArgs<2> args(cx);
+ args[0].set(arg0);
+ args[1].set(arg1);
+ return Call(cx, fval, thisv, args, rval);
+}
+
+inline bool Call(JSContext* cx, HandleValue fval, JSObject* thisObj,
+ HandleValue arg0, HandleValue arg1, MutableHandleValue rval) {
+ RootedValue thisv(cx, ObjectOrNullValue(thisObj));
+ FixedInvokeArgs<2> args(cx);
+ args[0].set(arg0);
+ args[1].set(arg1);
+ return Call(cx, fval, thisv, args, rval);
+}
+
+// Perform the above Call() operation using the given arguments. Similar to
+// ConstructFromStack() below, this handles |!IsCallable(args.calleev())|.
+//
+// This internal operation is intended only for use with arguments known to be
+// on the JS stack, or at least in carefully-rooted memory. The vast majority of
+// potential users should instead use InvokeArgs in concert with Call().
+extern bool CallFromStack(JSContext* cx, const CallArgs& args,
+ CallReason reason = CallReason::Call);
+
+// ES6 7.3.13 Construct(F, argumentsList, newTarget). All parameters are
+// required, hopefully forcing callers to be careful not to (say) blindly pass
+// callee as |newTarget| when a different value should have been passed.
+// Behavior is unspecified if any element of |args| isn't initialized.
+//
+// |rval| is written to *only* after |fval| and |newTarget| have been consumed,
+// so |rval| *may* alias either argument.
+//
+// NOTE: As with the ES6 spec operation, it's the caller's responsibility to
+// ensure |fval| and |newTarget| are both |IsConstructor|.
+extern bool Construct(JSContext* cx, HandleValue fval,
+ const AnyConstructArgs& args, HandleValue newTarget,
+ MutableHandleObject objp);
+
+// Check that in the given |args|, which must be |args.isConstructing()|, that
+// |IsConstructor(args.callee())|. If this is not the case, throw a TypeError.
+// Otherwise, the user must ensure that, additionally,
+// |IsConstructor(args.newTarget())|. (If |args| comes directly from the
+// interpreter stack, as set up by JSOp::New, this comes for free.) Then perform
+// a Construct() operation using |args|.
+//
+// This internal operation is intended only for use with arguments known to be
+// on the JS stack, or at least in carefully-rooted memory. The vast majority of
+// potential users should instead use ConstructArgs in concert with Construct().
+extern bool ConstructFromStack(JSContext* cx, const CallArgs& args,
+ CallReason reason = CallReason::Call);
+
+// Call Construct(fval, args, newTarget), but use the given |thisv| as |this|
+// during construction of |fval|.
+//
+// |rval| is written to *only* after |fval|, |thisv|, and |newTarget| have been
+// consumed, so |rval| *may* alias any of these arguments.
+//
+// This method exists only for very rare cases where a |this| was created
+// caller-side for construction of |fval|: basically only for JITs using
+// |CreateThis|. If that's not you, use Construct()!
+extern bool InternalConstructWithProvidedThis(JSContext* cx, HandleValue fval,
+ HandleValue thisv,
+ const AnyConstructArgs& args,
+ HandleValue newTarget,
+ MutableHandleValue rval);
+
+/*
+ * Executes a script with the given envChain. To support debugging, the
+ * evalInFrame parameter can point to an arbitrary frame in the context's call
+ * stack to simulate executing an eval in that frame.
+ */
+extern bool ExecuteKernel(JSContext* cx, HandleScript script,
+ HandleObject envChainArg,
+ AbstractFramePtr evalInFrame,
+ MutableHandleValue result);
+
+/* Execute a script with the given envChain as global code. */
+extern bool Execute(JSContext* cx, HandleScript script, HandleObject envChain,
+ MutableHandleValue rval);
+
+class ExecuteState;
+class InvokeState;
+
+// RunState is passed to RunScript and RunScript then either passes it to the
+// interpreter or to the JITs. RunState contains all information we need to
+// construct an interpreter or JIT frame.
+class MOZ_RAII RunState {
+ protected:
+ enum Kind { Execute, Invoke };
+ Kind kind_;
+
+ RootedScript script_;
+
+ explicit RunState(JSContext* cx, Kind kind, JSScript* script)
+ : kind_(kind), script_(cx, script) {}
+
+ public:
+ bool isExecute() const { return kind_ == Execute; }
+ bool isInvoke() const { return kind_ == Invoke; }
+
+ ExecuteState* asExecute() const {
+ MOZ_ASSERT(isExecute());
+ return (ExecuteState*)this;
+ }
+ InvokeState* asInvoke() const {
+ MOZ_ASSERT(isInvoke());
+ return (InvokeState*)this;
+ }
+
+ JS::HandleScript script() const { return script_; }
+
+ InterpreterFrame* pushInterpreterFrame(JSContext* cx);
+ inline void setReturnValue(const Value& v);
+
+ private:
+ RunState(const RunState& other) = delete;
+ RunState(const ExecuteState& other) = delete;
+ RunState(const InvokeState& other) = delete;
+ void operator=(const RunState& other) = delete;
+};
+
+// Eval or global script.
+class MOZ_RAII ExecuteState : public RunState {
+ HandleObject envChain_;
+
+ AbstractFramePtr evalInFrame_;
+ MutableHandleValue result_;
+
+ public:
+ ExecuteState(JSContext* cx, JSScript* script, HandleObject envChain,
+ AbstractFramePtr evalInFrame, MutableHandleValue result)
+ : RunState(cx, Execute, script),
+ envChain_(envChain),
+ evalInFrame_(evalInFrame),
+ result_(result) {}
+
+ JSObject* environmentChain() const { return envChain_; }
+ bool isDebuggerEval() const { return !!evalInFrame_; }
+
+ InterpreterFrame* pushInterpreterFrame(JSContext* cx);
+
+ void setReturnValue(const Value& v) { result_.set(v); }
+};
+
+// Data to invoke a function.
+class MOZ_RAII InvokeState final : public RunState {
+ const CallArgs& args_;
+ MaybeConstruct construct_;
+
+ public:
+ InvokeState(JSContext* cx, const CallArgs& args, MaybeConstruct construct)
+ : RunState(cx, Invoke, args.callee().as<JSFunction>().nonLazyScript()),
+ args_(args),
+ construct_(construct) {}
+
+ bool constructing() const { return construct_; }
+ const CallArgs& args() const { return args_; }
+
+ InterpreterFrame* pushInterpreterFrame(JSContext* cx);
+
+ void setReturnValue(const Value& v) { args_.rval().set(v); }
+};
+
+inline void RunState::setReturnValue(const Value& v) {
+ if (isInvoke()) {
+ asInvoke()->setReturnValue(v);
+ } else {
+ asExecute()->setReturnValue(v);
+ }
+}
+
+extern bool RunScript(JSContext* cx, RunState& state);
+extern bool Interpret(JSContext* cx, RunState& state);
+
+extern JSType TypeOfObject(JSObject* obj);
+
+extern JSType TypeOfValue(const Value& v);
+
+// Implementation of
+// https://www.ecma-international.org/ecma-262/6.0/#sec-instanceofoperator
+extern bool InstanceofOperator(JSContext* cx, HandleObject obj, HandleValue v,
+ bool* bp);
+
+// Unwind environment chain and iterator to match the scope corresponding to
+// the given bytecode position.
+extern void UnwindEnvironment(JSContext* cx, EnvironmentIter& ei,
+ jsbytecode* pc);
+
+// Unwind all environments.
+extern void UnwindAllEnvironmentsInFrame(JSContext* cx, EnvironmentIter& ei);
+
+// Compute the pc needed to unwind the scope to the beginning of the block
+// pointed to by the try note.
+extern jsbytecode* UnwindEnvironmentToTryPc(JSScript* script,
+ const TryNote* tn);
+
+namespace detail {
+
+template <class TryNoteFilter>
+class MOZ_STACK_CLASS BaseTryNoteIter {
+ uint32_t pcOffset_;
+ TryNoteFilter isTryNoteValid_;
+
+ const TryNote* tn_;
+ const TryNote* tnEnd_;
+
+ void settle() {
+ for (; tn_ != tnEnd_; ++tn_) {
+ if (!pcInRange()) {
+ continue;
+ }
+
+ /* Try notes cannot be disjoint. That is, we can't have
+ * multiple notes with disjoint pc ranges jumping to the same
+ * catch block. This interacts awkwardly with for-of loops, in
+ * which calls to IteratorClose emitted due to abnormal
+ * completion (break, throw, return) are emitted inline, at the
+ * source location of the break, throw, or return
+ * statement. For example:
+ *
+ * for (x of iter) {
+ * try { return; } catch (e) { }
+ * }
+ *
+ * From the try-note nesting's perspective, the IteratorClose
+ * resulting from |return| is covered by the inner try, when it
+ * should not be. If IteratorClose throws, we don't want to
+ * catch it here.
+ *
+ * To make this work, we use TryNoteKind::ForOfIterClose try-notes,
+ * which cover the range of the abnormal completion. When
+ * looking up trynotes, a for-of iterclose note indicates that
+ * the enclosing for-of has just been terminated. As a result,
+ * trynotes within that for-of are no longer active. When we
+ * see a for-of-iterclose, we skip ahead in the trynotes list
+ * until we see the matching for-of.
+ *
+ * Breaking out of multiple levels of for-of at once is handled
+ * using nested FOR_OF_ITERCLOSE try-notes. Consider this code:
+ *
+ * try {
+ * loop: for (i of first) {
+ * <A>
+ * for (j of second) {
+ * <B>
+ * break loop; // <C1/2>
+ * }
+ * }
+ * } catch {...}
+ *
+ * Here is the mapping from various PCs to try-notes that we
+ * want to return:
+ *
+ * A B C1 C2
+ * | | | |
+ * | | | [---|---] ForOfIterClose (outer)
+ * | | [---|------|---] ForOfIterClose (inner)
+ * | [--X-----|------|----] ForOf (inner)
+ * [---X-----------X------|-----] ForOf (outer)
+ * [------------------------X------] TryCatch
+ *
+ * - At A, we find the outer for-of.
+ * - At B, we find the inner for-of.
+ * - At C1, we find one FOR_OF_ITERCLOSE, skip past one FOR_OF, and find
+ * the outer for-of. (This occurs if an exception is thrown while
+ * closing the inner iterator.)
+ * - At C2, we find two FOR_OF_ITERCLOSE, skip past two FOR_OF, and reach
+ * the outer try-catch. (This occurs if an exception is thrown while
+ * closing the outer iterator.)
+ */
+ if (tn_->kind() == TryNoteKind::ForOfIterClose) {
+ uint32_t iterCloseDepth = 1;
+ do {
+ ++tn_;
+ MOZ_ASSERT(tn_ != tnEnd_);
+ if (pcInRange()) {
+ if (tn_->kind() == TryNoteKind::ForOfIterClose) {
+ iterCloseDepth++;
+ } else if (tn_->kind() == TryNoteKind::ForOf) {
+ iterCloseDepth--;
+ }
+ }
+ } while (iterCloseDepth > 0);
+
+ // Advance to trynote following the enclosing for-of.
+ continue;
+ }
+
+ /*
+ * We have a note that covers the exception pc but we must check
+ * whether the interpreter has already executed the corresponding
+ * handler. This is possible when the executed bytecode implements
+ * break or return from inside a for-in loop.
+ *
+ * In this case the emitter generates additional [enditer] and [goto]
+ * opcodes to close all outstanding iterators and execute the finally
+ * blocks. If such an [enditer] throws an exception, its pc can still
+ * be inside several nested for-in loops and try-finally statements
+ * even if we have already closed the corresponding iterators and
+ * invoked the finally blocks.
+ *
+ * To address this, we make [enditer] always decrease the stack even
+ * when its implementation throws an exception. Thus already executed
+ * [enditer] and [goto] opcodes will have try notes with the stack
+ * depth exceeding the current one and this condition is what we use to
+ * filter them out.
+ */
+ if (tn_ == tnEnd_ || isTryNoteValid_(tn_)) {
+ return;
+ }
+ }
+ }
+
+ public:
+ BaseTryNoteIter(JSScript* script, jsbytecode* pc,
+ TryNoteFilter isTryNoteValid)
+ : pcOffset_(script->pcToOffset(pc)), isTryNoteValid_(isTryNoteValid) {
+ // NOTE: The Span is a temporary so we can't use begin()/end()
+ // here or the iterator will outlive the span.
+ auto trynotes = script->trynotes();
+ tn_ = trynotes.data();
+ tnEnd_ = tn_ + trynotes.size();
+
+ settle();
+ }
+
+ void operator++() {
+ ++tn_;
+ settle();
+ }
+
+ bool pcInRange() const {
+ // This checks both ends of the range at once
+ // because unsigned integers wrap on underflow.
+ uint32_t offset = pcOffset_;
+ uint32_t start = tn_->start;
+ uint32_t length = tn_->length;
+ return offset - start < length;
+ }
+ bool done() const { return tn_ == tnEnd_; }
+ const TryNote* operator*() const { return tn_; }
+};
+
+} // namespace detail
+
+template <class TryNoteFilter>
+class MOZ_STACK_CLASS TryNoteIter
+ : public detail::BaseTryNoteIter<TryNoteFilter> {
+ using Base = detail::BaseTryNoteIter<TryNoteFilter>;
+
+ // Keep the script alive as long as the iterator is live.
+ RootedScript script_;
+
+ public:
+ TryNoteIter(JSContext* cx, JSScript* script, jsbytecode* pc,
+ TryNoteFilter isTryNoteValid)
+ : Base(script, pc, isTryNoteValid), script_(cx, script) {}
+};
+
+class NoOpTryNoteFilter {
+ public:
+ explicit NoOpTryNoteFilter() = default;
+ bool operator()(const TryNote*) { return true; }
+};
+
+// Iterator over all try notes. Code using this iterator is not allowed to
+// trigger GC to make sure the script stays alive. See TryNoteIter above for the
+// can-GC version.
+class MOZ_STACK_CLASS TryNoteIterAllNoGC
+ : public detail::BaseTryNoteIter<NoOpTryNoteFilter> {
+ using Base = detail::BaseTryNoteIter<NoOpTryNoteFilter>;
+ JS::AutoCheckCannotGC nogc;
+
+ public:
+ TryNoteIterAllNoGC(JSScript* script, jsbytecode* pc)
+ : Base(script, pc, NoOpTryNoteFilter()) {}
+};
+
+bool HandleClosingGeneratorReturn(JSContext* cx, AbstractFramePtr frame,
+ bool ok);
+
+/************************************************************************/
+
+bool ThrowOperation(JSContext* cx, HandleValue v);
+
+bool GetProperty(JSContext* cx, HandleValue value, Handle<PropertyName*> name,
+ MutableHandleValue vp);
+
+JSObject* Lambda(JSContext* cx, HandleFunction fun, HandleObject parent);
+
+bool SetObjectElement(JSContext* cx, HandleObject obj, HandleValue index,
+ HandleValue value, bool strict);
+
+bool SetObjectElementWithReceiver(JSContext* cx, HandleObject obj,
+ HandleValue index, HandleValue value,
+ HandleValue receiver, bool strict);
+
+bool AddValues(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res);
+
+bool SubValues(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res);
+
+bool MulValues(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res);
+
+bool DivValues(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res);
+
+bool ModValues(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res);
+
+bool PowValues(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res);
+
+bool BitNot(JSContext* cx, MutableHandleValue in, MutableHandleValue res);
+
+bool BitXor(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res);
+
+bool BitOr(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res);
+
+bool BitAnd(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res);
+
+bool BitLsh(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res);
+
+bool BitRsh(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res);
+
+bool UrshValues(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ MutableHandleValue res);
+
+bool LessThan(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ bool* res);
+
+bool LessThanOrEqual(JSContext* cx, MutableHandleValue lhs,
+ MutableHandleValue rhs, bool* res);
+
+bool GreaterThan(JSContext* cx, MutableHandleValue lhs, MutableHandleValue rhs,
+ bool* res);
+
+bool GreaterThanOrEqual(JSContext* cx, MutableHandleValue lhs,
+ MutableHandleValue rhs, bool* res);
+
+bool AtomicIsLockFree(JSContext* cx, HandleValue in, int* out);
+
+template <bool strict>
+bool DelPropOperation(JSContext* cx, HandleValue val,
+ Handle<PropertyName*> name, bool* res);
+
+template <bool strict>
+bool DelElemOperation(JSContext* cx, HandleValue val, HandleValue index,
+ bool* res);
+
+JSObject* BindVarOperation(JSContext* cx, JSObject* envChain);
+
+JSObject* ImportMetaOperation(JSContext* cx, HandleScript script);
+
+JSObject* BuiltinObjectOperation(JSContext* cx, BuiltinObjectKind kind);
+
+bool ThrowMsgOperation(JSContext* cx, const unsigned throwMsgKind);
+
+bool GetAndClearException(JSContext* cx, MutableHandleValue res);
+
+bool GetAndClearExceptionAndStack(JSContext* cx, MutableHandleValue res,
+ MutableHandle<SavedFrame*> stack);
+
+bool DeleteNameOperation(JSContext* cx, Handle<PropertyName*> name,
+ HandleObject scopeObj, MutableHandleValue res);
+
+bool ImplicitThisOperation(JSContext* cx, HandleObject scopeObj,
+ Handle<PropertyName*> name, MutableHandleValue res);
+
+bool InitPropGetterSetterOperation(JSContext* cx, jsbytecode* pc,
+ HandleObject obj, Handle<PropertyName*> name,
+ HandleObject val);
+
+unsigned GetInitDataPropAttrs(JSOp op);
+
+bool EnterWithOperation(JSContext* cx, AbstractFramePtr frame, HandleValue val,
+ Handle<WithScope*> scope);
+
+bool InitElemGetterSetterOperation(JSContext* cx, jsbytecode* pc,
+ HandleObject obj, HandleValue idval,
+ HandleObject val);
+
+bool SpreadCallOperation(JSContext* cx, HandleScript script, jsbytecode* pc,
+ HandleValue thisv, HandleValue callee, HandleValue arr,
+ HandleValue newTarget, MutableHandleValue res);
+
+bool OptimizeSpreadCall(JSContext* cx, HandleValue arg,
+ MutableHandleValue result);
+
+ArrayObject* ArrayFromArgumentsObject(JSContext* cx,
+ Handle<ArgumentsObject*> args);
+
+JSObject* NewObjectOperation(JSContext* cx, HandleScript script,
+ const jsbytecode* pc);
+
+JSObject* NewPlainObjectBaselineFallback(JSContext* cx,
+ Handle<SharedShape*> shape,
+ gc::AllocKind allocKind,
+ gc::AllocSite* site);
+
+JSObject* NewPlainObjectOptimizedFallback(JSContext* cx,
+ Handle<SharedShape*> shape,
+ gc::AllocKind allocKind,
+ gc::Heap initialHeap);
+
+ArrayObject* NewArrayOperation(JSContext* cx, uint32_t length,
+ NewObjectKind newKind = GenericObject);
+
+// Called from JIT code when inline array allocation fails.
+ArrayObject* NewArrayObjectBaselineFallback(JSContext* cx, uint32_t length,
+ gc::AllocKind allocKind,
+ gc::AllocSite* site);
+ArrayObject* NewArrayObjectOptimizedFallback(JSContext* cx, uint32_t length,
+ gc::AllocKind allocKind,
+ NewObjectKind newKind);
+
+[[nodiscard]] bool GetImportOperation(JSContext* cx, HandleObject envChain,
+ HandleScript script, jsbytecode* pc,
+ MutableHandleValue vp);
+
+void ReportRuntimeLexicalError(JSContext* cx, unsigned errorNumber,
+ HandleId id);
+
+void ReportRuntimeLexicalError(JSContext* cx, unsigned errorNumber,
+ Handle<PropertyName*> name);
+
+void ReportRuntimeLexicalError(JSContext* cx, unsigned errorNumber,
+ HandleScript script, jsbytecode* pc);
+
+void ReportInNotObjectError(JSContext* cx, HandleValue lref, HandleValue rref);
+
+// The parser only reports redeclarations that occurs within a single
+// script. Due to the extensibility of the global lexical scope, we also check
+// for redeclarations during runtime in JSOp::GlobalOrEvalDeclInstantation.
+void ReportRuntimeRedeclaration(JSContext* cx, Handle<PropertyName*> name,
+ const char* redeclKind);
+
+bool ThrowCheckIsObject(JSContext* cx, CheckIsObjectKind kind);
+
+bool ThrowUninitializedThis(JSContext* cx);
+
+bool ThrowInitializedThis(JSContext* cx);
+
+bool ThrowObjectCoercible(JSContext* cx, HandleValue value);
+
+bool DefaultClassConstructor(JSContext* cx, unsigned argc, Value* vp);
+
+bool Debug_CheckSelfHosted(JSContext* cx, HandleValue funVal);
+
+bool CheckClassHeritageOperation(JSContext* cx, HandleValue heritage);
+
+PlainObject* ObjectWithProtoOperation(JSContext* cx, HandleValue proto);
+
+JSObject* FunWithProtoOperation(JSContext* cx, HandleFunction fun,
+ HandleObject parent, HandleObject proto);
+
+bool SetPropertySuper(JSContext* cx, HandleValue lval, HandleValue receiver,
+ Handle<PropertyName*> name, HandleValue rval,
+ bool strict);
+
+bool SetElementSuper(JSContext* cx, HandleValue lval, HandleValue receiver,
+ HandleValue index, HandleValue rval, bool strict);
+
+bool LoadAliasedDebugVar(JSContext* cx, JSObject* env, jsbytecode* pc,
+ MutableHandleValue result);
+
+bool CloseIterOperation(JSContext* cx, HandleObject iter, CompletionKind kind);
+} /* namespace js */
+
+#endif /* vm_Interpreter_h */
diff --git a/js/src/vm/IsGivenTypeObject-inl.h b/js/src/vm/IsGivenTypeObject-inl.h
new file mode 100644
index 0000000000..f4b17ebb43
--- /dev/null
+++ b/js/src/vm/IsGivenTypeObject-inl.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_IsGivenTypeObject_inl_h
+#define vm_IsGivenTypeObject_inl_h
+
+#include "js/Class.h" // js::ESClass
+#include "js/Object.h" // JS::GetBuiltinClass
+#include "js/RootingAPI.h" // JS::Handle
+
+#include "vm/JSContext-inl.h" // JSContext::check
+
+namespace js {
+
+inline bool IsGivenTypeObject(JSContext* cx, JS::Handle<JSObject*> obj,
+ const ESClass& typeClass, bool* isType) {
+ cx->check(obj);
+
+ ESClass cls;
+ if (!JS::GetBuiltinClass(cx, obj, &cls)) {
+ return false;
+ }
+
+ *isType = cls == typeClass;
+ return true;
+}
+
+} // namespace js
+
+#endif // vm_IsGivenTypeObject_inl_h
diff --git a/js/src/vm/Iteration.cpp b/js/src/vm/Iteration.cpp
new file mode 100644
index 0000000000..a7d7287770
--- /dev/null
+++ b/js/src/vm/Iteration.cpp
@@ -0,0 +1,2168 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JavaScript iterators. */
+
+#include "vm/Iteration.h"
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Likely.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/PodOperations.h"
+
+#include <algorithm>
+#include <new>
+
+#include "jsapi.h"
+#include "jstypes.h"
+
+#include "builtin/Array.h"
+#include "builtin/SelfHostingDefines.h"
+#include "ds/Sort.h"
+#include "gc/GCContext.h"
+#include "js/ForOfIterator.h" // JS::ForOfIterator
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/PropertySpec.h"
+#include "util/DifferentialTesting.h"
+#include "util/Poison.h"
+#include "vm/GlobalObject.h"
+#include "vm/Interpreter.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/NativeObject.h" // js::PlainObject
+#include "vm/Shape.h"
+#include "vm/StringType.h"
+#include "vm/TypedArrayObject.h"
+#include "vm/WellKnownAtom.h" // js_*_str
+
+#ifdef ENABLE_RECORD_TUPLE
+# include "builtin/RecordObject.h"
+# include "builtin/TupleObject.h"
+#endif
+
+#include "vm/NativeObject-inl.h"
+#include "vm/PlainObject-inl.h" // js::PlainObject::createWithTemplate
+
+using namespace js;
+
+using mozilla::ArrayEqual;
+using mozilla::DebugOnly;
+using mozilla::Maybe;
+using mozilla::PodCopy;
+
+using RootedPropertyIteratorObject = Rooted<PropertyIteratorObject*>;
+
+static const gc::AllocKind ITERATOR_FINALIZE_KIND =
+ gc::AllocKind::OBJECT2_BACKGROUND;
+
+// Beware! This function may have to trace incompletely-initialized
+// |NativeIterator| allocations if the |IdToString| in that constructor recurs
+// into this code.
+void NativeIterator::trace(JSTracer* trc) {
+ TraceNullableEdge(trc, &objectBeingIterated_, "objectBeingIterated_");
+ TraceNullableEdge(trc, &iterObj_, "iterObj");
+
+ // The limits below are correct at every instant of |NativeIterator|
+ // initialization, with the end-pointer incremented as each new shape is
+ // created, so they're safe to use here.
+ std::for_each(shapesBegin(), shapesEnd(), [trc](GCPtr<Shape*>& shape) {
+ TraceEdge(trc, &shape, "iterator_shape");
+ });
+
+ // But as properties must be created *before* shapes, |propertiesBegin()|
+ // that depends on |shapesEnd()| having its final value can't safely be
+ // used. Until this is fully initialized, use |propertyCursor_| instead,
+ // which points at the start of properties even in partially initialized
+ // |NativeIterator|s. (|propertiesEnd()| is safe at all times with respect
+ // to the properly-chosen beginning.)
+ //
+ // Note that we must trace all properties (not just those not yet visited,
+ // or just visited, due to |NativeIterator::previousPropertyWas|) for
+ // |NativeIterator|s to be reusable.
+ GCPtr<JSLinearString*>* begin =
+ MOZ_LIKELY(isInitialized()) ? propertiesBegin() : propertyCursor_;
+ std::for_each(begin, propertiesEnd(), [trc](GCPtr<JSLinearString*>& prop) {
+ // Properties begin life non-null and never *become*
+ // null. (Deletion-suppression will shift trailing
+ // properties over a deleted property in the properties
+ // array, but it doesn't null them out.)
+ TraceEdge(trc, &prop, "prop");
+ });
+}
+
+using PropertyKeySet = GCHashSet<PropertyKey, DefaultHasher<PropertyKey>>;
+
+class PropertyEnumerator {
+ RootedObject obj_;
+ MutableHandleIdVector props_;
+ PropertyIndexVector* indices_;
+
+ uint32_t flags_;
+ Rooted<PropertyKeySet> visited_;
+
+ bool enumeratingProtoChain_ = false;
+
+ enum class IndicesState {
+ // Every property that has been enumerated so far can be represented as a
+ // PropertyIndex, but we are not currently producing a list of indices. If
+ // the state is Valid when we are done enumerating, then the resulting
+ // iterator can be marked as NativeIteratorIndices::AvailableOnRequest.
+ Valid,
+
+ // Every property that has been enumerated so far can be represented as a
+ // PropertyIndex, and |indices_| points to a PropertyIndexVector containing
+ // those indices. This is used when we want to create a NativeIterator with
+ // valid indices.
+ Allocating,
+
+ // It is not possible to represent every property of the object being
+ // enumerated as a PropertyIndex. For example, enumerated properties on the
+ // prototype chain are unsupported. We can transition to this state from
+ // either of the other two.
+ Unsupported
+ };
+ IndicesState indicesState_;
+
+ public:
+ PropertyEnumerator(JSContext* cx, JSObject* obj, uint32_t flags,
+ MutableHandleIdVector props,
+ PropertyIndexVector* indices = nullptr)
+ : obj_(cx, obj),
+ props_(props),
+ indices_(indices),
+ flags_(flags),
+ visited_(cx, PropertyKeySet(cx)),
+ indicesState_(indices ? IndicesState::Allocating
+ : IndicesState::Valid) {}
+
+ bool snapshot(JSContext* cx);
+
+ void markIndicesUnsupported() { indicesState_ = IndicesState::Unsupported; }
+ bool supportsIndices() const {
+ return indicesState_ != IndicesState::Unsupported;
+ }
+ bool allocatingIndices() const {
+ return indicesState_ == IndicesState::Allocating;
+ }
+
+ private:
+ template <bool CheckForDuplicates>
+ bool enumerate(JSContext* cx, jsid id, bool enumerable,
+ PropertyIndex index = PropertyIndex::Invalid());
+
+ bool enumerateExtraProperties(JSContext* cx);
+
+ template <bool CheckForDuplicates>
+ bool enumerateNativeProperties(JSContext* cx);
+
+ bool enumerateNativeProperties(JSContext* cx, bool checkForDuplicates) {
+ if (checkForDuplicates) {
+ return enumerateNativeProperties<true>(cx);
+ }
+ return enumerateNativeProperties<false>(cx);
+ }
+
+ template <bool CheckForDuplicates>
+ bool enumerateProxyProperties(JSContext* cx);
+
+ void reversePropsAndIndicesAfter(size_t initialLength) {
+ // We iterate through prop maps in descending order of property creation,
+ // but we need our return value to be in ascending order. If we are tracking
+ // property indices, make sure to keep them in sync.
+ MOZ_ASSERT(props_.begin() + initialLength <= props_.end());
+ MOZ_ASSERT_IF(allocatingIndices(), props_.length() == indices_->length());
+
+ std::reverse(props_.begin() + initialLength, props_.end());
+ if (allocatingIndices()) {
+ std::reverse(indices_->begin() + initialLength, indices_->end());
+ }
+ }
+};
+
+template <bool CheckForDuplicates>
+bool PropertyEnumerator::enumerate(JSContext* cx, jsid id, bool enumerable,
+ PropertyIndex index) {
+ if (CheckForDuplicates) {
+ // If we've already seen this, we definitely won't add it.
+ PropertyKeySet::AddPtr p = visited_.lookupForAdd(id);
+ if (MOZ_UNLIKELY(!!p)) {
+ return true;
+ }
+
+ // It's not necessary to add properties to the hash set at the end of
+ // the prototype chain, but custom enumeration behaviors might return
+ // duplicated properties, so always add in such cases.
+ if (obj_->is<ProxyObject>() || obj_->staticPrototype() ||
+ obj_->getClass()->getNewEnumerate()) {
+ if (!visited_.add(p, id)) {
+ return false;
+ }
+ }
+ }
+
+ if (!enumerable && !(flags_ & JSITER_HIDDEN)) {
+ return true;
+ }
+
+ // Symbol-keyed properties and nonenumerable properties are skipped unless
+ // the caller specifically asks for them. A caller can also filter out
+ // non-symbols by asking for JSITER_SYMBOLSONLY. PrivateName symbols are
+ // skipped unless JSITER_PRIVATE is passed.
+ if (id.isSymbol()) {
+ if (!(flags_ & JSITER_SYMBOLS)) {
+ return true;
+ }
+ if (!(flags_ & JSITER_PRIVATE) && id.isPrivateName()) {
+ return true;
+ }
+ } else {
+ if ((flags_ & JSITER_SYMBOLSONLY)) {
+ return true;
+ }
+ }
+
+ MOZ_ASSERT_IF(allocatingIndices(), indices_->length() == props_.length());
+ if (!props_.append(id)) {
+ return false;
+ }
+
+ if (!supportsIndices()) {
+ return true;
+ }
+ if (index.kind() == PropertyIndex::Kind::Invalid || enumeratingProtoChain_) {
+ markIndicesUnsupported();
+ return true;
+ }
+
+ if (allocatingIndices() && !indices_->append(index)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool PropertyEnumerator::enumerateExtraProperties(JSContext* cx) {
+ MOZ_ASSERT(obj_->getClass()->getNewEnumerate());
+
+ RootedIdVector properties(cx);
+ bool enumerableOnly = !(flags_ & JSITER_HIDDEN);
+ if (!obj_->getClass()->getNewEnumerate()(cx, obj_, &properties,
+ enumerableOnly)) {
+ return false;
+ }
+
+ RootedId id(cx);
+ for (size_t n = 0; n < properties.length(); n++) {
+ id = properties[n];
+
+ // The enumerate hook does not indicate whether the properties
+ // it returns are enumerable or not. Since we already passed
+ // `enumerableOnly` to the hook to filter out non-enumerable
+ // properties, it doesn't really matter what we pass here.
+ bool enumerable = true;
+ if (!enumerate<true>(cx, id, enumerable)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool SortComparatorIntegerIds(jsid a, jsid b, bool* lessOrEqualp) {
+ uint32_t indexA, indexB;
+ MOZ_ALWAYS_TRUE(IdIsIndex(a, &indexA));
+ MOZ_ALWAYS_TRUE(IdIsIndex(b, &indexB));
+ *lessOrEqualp = (indexA <= indexB);
+ return true;
+}
+
+template <bool CheckForDuplicates>
+bool PropertyEnumerator::enumerateNativeProperties(JSContext* cx) {
+ Handle<NativeObject*> pobj = obj_.as<NativeObject>();
+
+ // We don't need to iterate over the shape's properties if we're only
+ // interested in enumerable properties and the object is known to have no
+ // enumerable properties.
+ //
+ // Don't optimize if CheckForDuplicates is true, because non-enumerable
+ // properties still have to participate in duplicate-property checking.
+ const bool iterShapeProperties = CheckForDuplicates ||
+ (flags_ & JSITER_HIDDEN) ||
+ pobj->hasEnumerableProperty();
+
+ bool enumerateSymbols;
+ if (flags_ & JSITER_SYMBOLSONLY) {
+ if (!iterShapeProperties) {
+ return true;
+ }
+ enumerateSymbols = true;
+ } else {
+ // Collect any dense elements from this object.
+ size_t firstElemIndex = props_.length();
+ size_t initlen = pobj->getDenseInitializedLength();
+ const Value* elements = pobj->getDenseElements();
+ bool hasHoles = false;
+ for (uint32_t i = 0; i < initlen; ++i) {
+ if (elements[i].isMagic(JS_ELEMENTS_HOLE)) {
+ hasHoles = true;
+ } else {
+ // Dense arrays never get so large that i would not fit into an
+ // integer id.
+ if (!enumerate<CheckForDuplicates>(cx, PropertyKey::Int(i),
+ /* enumerable = */ true,
+ PropertyIndex::ForElement(i))) {
+ return false;
+ }
+ }
+ }
+
+ // Collect any typed array or shared typed array elements from this
+ // object.
+ if (pobj->is<TypedArrayObject>()) {
+ size_t len = pobj->as<TypedArrayObject>().length();
+
+ // Fail early if the typed array is enormous, because this will be very
+ // slow and will likely report OOM. This also means we don't need to
+ // handle indices greater than PropertyKey::IntMax in the loop below.
+ static_assert(PropertyKey::IntMax == INT32_MAX);
+ if (len > INT32_MAX) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ for (uint32_t i = 0; i < len; i++) {
+ if (!enumerate<CheckForDuplicates>(cx, PropertyKey::Int(i),
+ /* enumerable = */ true)) {
+ return false;
+ }
+ }
+ }
+#ifdef ENABLE_RECORD_TUPLE
+ else {
+ Rooted<RecordType*> rec(cx);
+ if (RecordObject::maybeUnbox(pobj, &rec)) {
+ Rooted<ArrayObject*> keys(cx, rec->keys());
+
+ for (size_t i = 0; i < keys->length(); i++) {
+ JSAtom* key = &keys->getDenseElement(i).toString()->asAtom();
+ PropertyKey id = AtomToId(key);
+ if (!enumerate<CheckForDuplicates>(cx, id,
+ /* enumerable = */ true)) {
+ return false;
+ }
+ }
+
+ return true;
+ } else {
+ mozilla::Maybe<TupleType&> tup = TupleObject::maybeUnbox(pobj);
+ if (tup) {
+ uint32_t len = tup->length();
+
+ for (size_t i = 0; i < len; i++) {
+ // We expect tuple indices not to get so large that `i` won't
+ // fit into an `int32_t`.
+ MOZ_ASSERT(PropertyKey::fitsInInt(i));
+ PropertyKey id = PropertyKey::Int(i);
+ if (!enumerate<CheckForDuplicates>(cx, id,
+ /* enumerable = */ true)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+ }
+ }
+#endif
+
+ // The code below enumerates shape properties (including sparse elements) so
+ // if we can ignore those we're done.
+ if (!iterShapeProperties) {
+ return true;
+ }
+
+ // Collect any sparse elements from this object.
+ bool isIndexed = pobj->isIndexed();
+ if (isIndexed) {
+ // If the dense elements didn't have holes, we don't need to include
+ // them in the sort.
+ if (!hasHoles) {
+ firstElemIndex = props_.length();
+ }
+
+ for (ShapePropertyIter<NoGC> iter(pobj->shape()); !iter.done(); iter++) {
+ jsid id = iter->key();
+ uint32_t dummy;
+ if (IdIsIndex(id, &dummy)) {
+ if (!enumerate<CheckForDuplicates>(cx, id, iter->enumerable())) {
+ return false;
+ }
+ }
+ }
+
+ MOZ_ASSERT(firstElemIndex <= props_.length());
+
+ jsid* ids = props_.begin() + firstElemIndex;
+ size_t n = props_.length() - firstElemIndex;
+
+ RootedIdVector tmp(cx);
+ if (!tmp.resize(n)) {
+ return false;
+ }
+ PodCopy(tmp.begin(), ids, n);
+
+ if (!MergeSort(ids, n, tmp.begin(), SortComparatorIntegerIds)) {
+ return false;
+ }
+ }
+
+ size_t initialLength = props_.length();
+
+ /* Collect all unique property names from this object's shape. */
+ bool symbolsFound = false;
+ for (ShapePropertyIter<NoGC> iter(pobj->shape()); !iter.done(); iter++) {
+ jsid id = iter->key();
+
+ if (id.isSymbol()) {
+ symbolsFound = true;
+ continue;
+ }
+
+ uint32_t dummy;
+ if (isIndexed && IdIsIndex(id, &dummy)) {
+ continue;
+ }
+
+ PropertyIndex index = iter->isDataProperty()
+ ? PropertyIndex::ForSlot(pobj, iter->slot())
+ : PropertyIndex::Invalid();
+ if (!enumerate<CheckForDuplicates>(cx, id, iter->enumerable(), index)) {
+ return false;
+ }
+ }
+ reversePropsAndIndicesAfter(initialLength);
+
+ enumerateSymbols = symbolsFound && (flags_ & JSITER_SYMBOLS);
+ }
+
+ if (enumerateSymbols) {
+ MOZ_ASSERT(iterShapeProperties);
+ MOZ_ASSERT(!allocatingIndices());
+
+ // Do a second pass to collect symbols. The spec requires that all symbols
+ // appear after all strings in [[OwnPropertyKeys]] for ordinary objects:
+ // https://tc39.es/ecma262/#sec-ordinaryownpropertykeys
+ size_t initialLength = props_.length();
+ for (ShapePropertyIter<NoGC> iter(pobj->shape()); !iter.done(); iter++) {
+ jsid id = iter->key();
+ if (id.isSymbol()) {
+ if (!enumerate<CheckForDuplicates>(cx, id, iter->enumerable())) {
+ return false;
+ }
+ }
+ }
+ reversePropsAndIndicesAfter(initialLength);
+ }
+
+ return true;
+}
+
+template <bool CheckForDuplicates>
+bool PropertyEnumerator::enumerateProxyProperties(JSContext* cx) {
+ MOZ_ASSERT(obj_->is<ProxyObject>());
+
+ RootedIdVector proxyProps(cx);
+
+ if (flags_ & JSITER_HIDDEN || flags_ & JSITER_SYMBOLS) {
+ // This gets all property keys, both strings and symbols. The call to
+ // enumerate in the loop below will filter out unwanted keys, per the
+ // flags.
+ if (!Proxy::ownPropertyKeys(cx, obj_, &proxyProps)) {
+ return false;
+ }
+
+ Rooted<mozilla::Maybe<PropertyDescriptor>> desc(cx);
+ for (size_t n = 0, len = proxyProps.length(); n < len; n++) {
+ bool enumerable = false;
+
+ // We need to filter, if the caller just wants enumerable symbols.
+ if (!(flags_ & JSITER_HIDDEN)) {
+ if (!Proxy::getOwnPropertyDescriptor(cx, obj_, proxyProps[n], &desc)) {
+ return false;
+ }
+ enumerable = desc.isSome() && desc->enumerable();
+ }
+
+ if (!enumerate<CheckForDuplicates>(cx, proxyProps[n], enumerable)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ // Returns enumerable property names (no symbols).
+ if (!Proxy::getOwnEnumerablePropertyKeys(cx, obj_, &proxyProps)) {
+ return false;
+ }
+
+ for (size_t n = 0, len = proxyProps.length(); n < len; n++) {
+ if (!enumerate<CheckForDuplicates>(cx, proxyProps[n], true)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+#ifdef DEBUG
+
+struct SortComparatorIds {
+ JSContext* const cx;
+
+ explicit SortComparatorIds(JSContext* cx) : cx(cx) {}
+
+ bool operator()(jsid aArg, jsid bArg, bool* lessOrEqualp) {
+ RootedId a(cx, aArg);
+ RootedId b(cx, bArg);
+
+ // Pick an arbitrary order on jsids that is as stable as possible
+ // across executions.
+ if (a == b) {
+ *lessOrEqualp = true;
+ return true;
+ }
+
+ enum class KeyType { Void, Int, String, Symbol };
+
+ auto keyType = [](PropertyKey key) {
+ if (key.isString()) {
+ return KeyType::String;
+ }
+ if (key.isInt()) {
+ return KeyType::Int;
+ }
+ if (key.isSymbol()) {
+ return KeyType::Symbol;
+ }
+ MOZ_ASSERT(key.isVoid());
+ return KeyType::Void;
+ };
+
+ if (keyType(a) != keyType(b)) {
+ *lessOrEqualp = (keyType(a) <= keyType(b));
+ return true;
+ }
+
+ if (a.isInt()) {
+ *lessOrEqualp = (a.toInt() <= b.toInt());
+ return true;
+ }
+
+ RootedString astr(cx), bstr(cx);
+ if (a.isSymbol()) {
+ MOZ_ASSERT(b.isSymbol());
+ JS::SymbolCode ca = a.toSymbol()->code();
+ JS::SymbolCode cb = b.toSymbol()->code();
+ if (ca != cb) {
+ *lessOrEqualp = uint32_t(ca) <= uint32_t(cb);
+ return true;
+ }
+ MOZ_ASSERT(ca == JS::SymbolCode::PrivateNameSymbol ||
+ ca == JS::SymbolCode::InSymbolRegistry ||
+ ca == JS::SymbolCode::UniqueSymbol);
+ astr = a.toSymbol()->description();
+ bstr = b.toSymbol()->description();
+ if (!astr || !bstr) {
+ *lessOrEqualp = !astr;
+ return true;
+ }
+
+ // Fall through to string comparison on the descriptions. The sort
+ // order is nondeterministic if two different unique symbols have
+ // the same description.
+ } else {
+ astr = IdToString(cx, a);
+ if (!astr) {
+ return false;
+ }
+ bstr = IdToString(cx, b);
+ if (!bstr) {
+ return false;
+ }
+ }
+
+ int32_t result;
+ if (!CompareStrings(cx, astr, bstr, &result)) {
+ return false;
+ }
+
+ *lessOrEqualp = (result <= 0);
+ return true;
+ }
+};
+
+#endif /* DEBUG */
+
+static void AssertNoEnumerableProperties(NativeObject* obj) {
+#ifdef DEBUG
+ // Verify the object has no enumerable properties if the HasEnumerable
+ // ObjectFlag is not set.
+
+ MOZ_ASSERT(!obj->hasEnumerableProperty());
+
+ static constexpr size_t MaxPropsToCheck = 5;
+
+ size_t count = 0;
+ for (ShapePropertyIter<NoGC> iter(obj->shape()); !iter.done(); iter++) {
+ MOZ_ASSERT(!iter->enumerable());
+ if (++count > MaxPropsToCheck) {
+ break;
+ }
+ }
+#endif // DEBUG
+}
+
+// Typed arrays and classes with an enumerate hook can have extra properties not
+// included in the shape's property map or the object's dense elements.
+static bool ClassCanHaveExtraEnumeratedProperties(const JSClass* clasp) {
+ return IsTypedArrayClass(clasp) || clasp->getNewEnumerate() ||
+ clasp->getEnumerate();
+}
+
+static bool ProtoMayHaveEnumerableProperties(JSObject* obj) {
+ if (!obj->is<NativeObject>()) {
+ return true;
+ }
+
+ JSObject* proto = obj->as<NativeObject>().staticPrototype();
+ while (proto) {
+ if (!proto->is<NativeObject>()) {
+ return true;
+ }
+ NativeObject* nproto = &proto->as<NativeObject>();
+ if (nproto->hasEnumerableProperty() ||
+ nproto->getDenseInitializedLength() > 0 ||
+ ClassCanHaveExtraEnumeratedProperties(nproto->getClass())) {
+ return true;
+ }
+ AssertNoEnumerableProperties(nproto);
+ proto = nproto->staticPrototype();
+ }
+
+ return false;
+}
+
+bool PropertyEnumerator::snapshot(JSContext* cx) {
+ // If we're only interested in enumerable properties and the proto chain has
+ // no enumerable properties (the common case), we can optimize this to ignore
+ // the proto chain. This also lets us take advantage of the no-duplicate-check
+ // optimization below.
+ if (!(flags_ & JSITER_HIDDEN) && !(flags_ & JSITER_OWNONLY) &&
+ !ProtoMayHaveEnumerableProperties(obj_)) {
+ flags_ |= JSITER_OWNONLY;
+ }
+
+ // Don't check for duplicates if we're only interested in own properties.
+ // This does the right thing for most objects: native objects don't have
+ // duplicate property ids and we allow the [[OwnPropertyKeys]] proxy trap to
+ // return duplicates.
+ //
+ // The only special case is when the object has a newEnumerate hook: it
+ // can return duplicate properties and we have to filter them. This is
+ // handled below.
+ bool checkForDuplicates = !(flags_ & JSITER_OWNONLY);
+
+ do {
+ if (obj_->getClass()->getNewEnumerate()) {
+ markIndicesUnsupported();
+
+ if (!enumerateExtraProperties(cx)) {
+ return false;
+ }
+
+ if (obj_->is<NativeObject>()) {
+ if (!enumerateNativeProperties(cx, /*checkForDuplicates*/ true)) {
+ return false;
+ }
+ }
+
+ } else if (obj_->is<NativeObject>()) {
+ // Give the object a chance to resolve all lazy properties
+ if (JSEnumerateOp enumerateOp = obj_->getClass()->getEnumerate()) {
+ markIndicesUnsupported();
+ if (!enumerateOp(cx, obj_.as<NativeObject>())) {
+ return false;
+ }
+ }
+ if (!enumerateNativeProperties(cx, checkForDuplicates)) {
+ return false;
+ }
+ } else if (obj_->is<ProxyObject>()) {
+ markIndicesUnsupported();
+ if (checkForDuplicates) {
+ if (!enumerateProxyProperties<true>(cx)) {
+ return false;
+ }
+ } else {
+ if (!enumerateProxyProperties<false>(cx)) {
+ return false;
+ }
+ }
+ } else {
+ MOZ_CRASH("non-native objects must have an enumerate op");
+ }
+
+ if (flags_ & JSITER_OWNONLY) {
+ break;
+ }
+
+ if (!GetPrototype(cx, obj_, &obj_)) {
+ return false;
+ }
+ enumeratingProtoChain_ = true;
+
+ // The [[Prototype]] chain might be cyclic.
+ if (!CheckForInterrupt(cx)) {
+ return false;
+ }
+ } while (obj_ != nullptr);
+
+#ifdef DEBUG
+ if (js::SupportDifferentialTesting() && !supportsIndices()) {
+ /*
+ * In some cases the enumeration order for an object depends on the
+ * execution mode (interpreter vs. JIT), especially for native objects
+ * with a class enumerate hook (where resolving a property changes the
+ * resulting enumeration order). These aren't really bugs, but the
+ * differences can change the generated output and confuse correctness
+ * fuzzers, so we sort the ids if such a fuzzer is running.
+ *
+ * We don't do this in the general case because (a) doing so is slow,
+ * and (b) it also breaks the web, which expects enumeration order to
+ * follow the order in which properties are added, in certain cases.
+ * Since ECMA does not specify an enumeration order for objects, both
+ * behaviors are technically correct to do.
+ */
+
+ jsid* ids = props_.begin();
+ size_t n = props_.length();
+
+ RootedIdVector tmp(cx);
+ if (!tmp.resize(n)) {
+ return false;
+ }
+ PodCopy(tmp.begin(), ids, n);
+
+ if (!MergeSort(ids, n, tmp.begin(), SortComparatorIds(cx))) {
+ return false;
+ }
+ }
+#endif
+
+ return true;
+}
+
+JS_PUBLIC_API bool js::GetPropertyKeys(JSContext* cx, HandleObject obj,
+ unsigned flags,
+ MutableHandleIdVector props) {
+ uint32_t validFlags =
+ flags & (JSITER_OWNONLY | JSITER_HIDDEN | JSITER_SYMBOLS |
+ JSITER_SYMBOLSONLY | JSITER_PRIVATE);
+
+ PropertyEnumerator enumerator(cx, obj, validFlags, props);
+ return enumerator.snapshot(cx);
+}
+
+static inline void RegisterEnumerator(JSContext* cx, NativeIterator* ni) {
+ MOZ_ASSERT(ni->objectBeingIterated());
+
+ // Register non-escaping native enumerators (for-in) with the current
+ // context.
+ ni->link(cx->compartment()->enumeratorsAddr());
+
+ MOZ_ASSERT(!ni->isActive());
+ ni->markActive();
+}
+
+static PropertyIteratorObject* NewPropertyIteratorObject(JSContext* cx) {
+ const JSClass* clasp = &PropertyIteratorObject::class_;
+ Rooted<SharedShape*> shape(
+ cx,
+ SharedShape::getInitialShape(cx, clasp, cx->realm(), TaggedProto(nullptr),
+ ITERATOR_FINALIZE_KIND));
+ if (!shape) {
+ return nullptr;
+ }
+
+ JSObject* obj = NativeObject::create(
+ cx, ITERATOR_FINALIZE_KIND, GetInitialHeap(GenericObject, clasp), shape);
+ if (!obj) {
+ return nullptr;
+ }
+
+ PropertyIteratorObject* res = &obj->as<PropertyIteratorObject>();
+
+ // CodeGenerator::visitIteratorStartO assumes the iterator object is not
+ // inside the nursery when deciding whether a barrier is necessary.
+ MOZ_ASSERT(!js::gc::IsInsideNursery(res));
+ return res;
+}
+
+static inline size_t NumTrailingBytes(size_t propertyCount, size_t shapeCount,
+ bool hasIndices) {
+ static_assert(alignof(GCPtr<JSLinearString*>) <= alignof(NativeIterator));
+ static_assert(alignof(GCPtr<Shape*>) <= alignof(GCPtr<JSLinearString*>));
+ static_assert(alignof(PropertyIndex) <= alignof(GCPtr<Shape*>));
+ size_t result = propertyCount * sizeof(GCPtr<JSLinearString*>) +
+ shapeCount * sizeof(GCPtr<Shape*>);
+ if (hasIndices) {
+ result += propertyCount * sizeof(PropertyIndex);
+ }
+ return result;
+}
+
+static inline size_t AllocationSize(size_t propertyCount, size_t shapeCount,
+ bool hasIndices) {
+ return sizeof(NativeIterator) +
+ NumTrailingBytes(propertyCount, shapeCount, hasIndices);
+}
+
+static PropertyIteratorObject* CreatePropertyIterator(
+ JSContext* cx, Handle<JSObject*> objBeingIterated, HandleIdVector props,
+ bool supportsIndices, PropertyIndexVector* indices,
+ uint32_t cacheableProtoChainLength) {
+ MOZ_ASSERT_IF(indices, supportsIndices);
+ if (props.length() > NativeIterator::PropCountLimit) {
+ ReportAllocationOverflow(cx);
+ return nullptr;
+ }
+
+ bool hasIndices = !!indices;
+
+ // If the iterator is cacheable, we store the shape of each object
+ // along the proto chain in the iterator. If the iterator is not
+ // cacheable, but has indices, then we store one shape (the shape of
+ // the object being iterated.)
+ uint32_t numShapes = cacheableProtoChainLength;
+ if (numShapes == 0 && hasIndices) {
+ numShapes = 1;
+ }
+
+ Rooted<PropertyIteratorObject*> propIter(cx, NewPropertyIteratorObject(cx));
+ if (!propIter) {
+ return nullptr;
+ }
+
+ void* mem = cx->pod_malloc_with_extra<NativeIterator, uint8_t>(
+ NumTrailingBytes(props.length(), numShapes, hasIndices));
+ if (!mem) {
+ return nullptr;
+ }
+
+ // This also registers |ni| with |propIter|.
+ bool hadError = false;
+ new (mem) NativeIterator(cx, propIter, objBeingIterated, props,
+ supportsIndices, indices, numShapes, &hadError);
+ if (hadError) {
+ return nullptr;
+ }
+
+ return propIter;
+}
+
+static HashNumber HashIteratorShape(Shape* shape) {
+ return DefaultHasher<Shape*>::hash(shape);
+}
+
+/**
+ * Initialize a fresh NativeIterator.
+ *
+ * This definition is a bit tricky: some parts of initializing are fallible, so
+ * as we initialize, we must carefully keep this in GC-safe state (see
+ * NativeIterator::trace).
+ */
+NativeIterator::NativeIterator(JSContext* cx,
+ Handle<PropertyIteratorObject*> propIter,
+ Handle<JSObject*> objBeingIterated,
+ HandleIdVector props, bool supportsIndices,
+ PropertyIndexVector* indices, uint32_t numShapes,
+ bool* hadError)
+ : objectBeingIterated_(objBeingIterated),
+ iterObj_(propIter),
+ // NativeIterator initially acts (before full initialization) as if it
+ // contains no shapes...
+ shapesEnd_(shapesBegin()),
+ // ...and no properties.
+ propertyCursor_(
+ reinterpret_cast<GCPtr<JSLinearString*>*>(shapesBegin() + numShapes)),
+ propertiesEnd_(propertyCursor_),
+ shapesHash_(0),
+ flagsAndCount_(
+ initialFlagsAndCount(props.length())) // note: no Flags::Initialized
+{
+ // If there are shapes, the object and all objects on its prototype chain must
+ // be native objects. See CanCompareIterableObjectToCache.
+ MOZ_ASSERT_IF(numShapes > 0,
+ objBeingIterated && objBeingIterated->is<NativeObject>());
+
+ MOZ_ASSERT(!*hadError);
+
+ bool hasActualIndices = !!indices;
+ MOZ_ASSERT_IF(hasActualIndices, indices->length() == props.length());
+
+ // NOTE: This must be done first thing: The caller can't free `this` on error
+ // because it has GCPtr fields whose barriers have already fired; the
+ // store buffer has pointers to them. Only the GC can free `this` (via
+ // PropertyIteratorObject::finalize).
+ propIter->initNativeIterator(this);
+
+ // The GC asserts on finalization that `this->allocationSize()` matches the
+ // `nbytes` passed to `AddCellMemory`. So once these lines run, we must make
+ // `this->allocationSize()` correct. That means infallibly initializing the
+ // shapes, and ensuring that indicesState_.allocated() is true if we've
+ // allocated space for indices. It's OK for the constructor to fail after
+ // that.
+ size_t nbytes = AllocationSize(props.length(), numShapes, hasActualIndices);
+ AddCellMemory(propIter, nbytes, MemoryUse::NativeIterator);
+ if (supportsIndices) {
+ if (hasActualIndices) {
+ // If the string allocation fails, indicesAllocated() must be true
+ // so that this->allocationSize() is correct. Set it to Disabled. It will
+ // be updated below.
+ setIndicesState(NativeIteratorIndices::Disabled);
+ } else {
+ // This object supports indices (ie it only has own enumerable
+ // properties), but we didn't allocate them because we haven't seen a
+ // consumer yet. We mark the iterator so that potential consumers know to
+ // request a fresh iterator with indices.
+ setIndicesState(NativeIteratorIndices::AvailableOnRequest);
+ }
+ }
+
+ if (numShapes > 0) {
+ // Construct shapes into the shapes array. Also compute the shapesHash,
+ // which incorporates Shape* addresses that could have changed during a GC
+ // triggered in (among other places) |IdToString| above.
+ JSObject* pobj = objBeingIterated;
+ HashNumber shapesHash = 0;
+ for (uint32_t i = 0; i < numShapes; i++) {
+ MOZ_ASSERT(pobj->is<NativeObject>());
+ Shape* shape = pobj->shape();
+ new (shapesEnd_) GCPtr<Shape*>(shape);
+ shapesEnd_++;
+ shapesHash = mozilla::AddToHash(shapesHash, HashIteratorShape(shape));
+ pobj = pobj->staticPrototype();
+ }
+ shapesHash_ = shapesHash;
+
+ // There are two cases in which we need to store shapes. If this
+ // iterator is cacheable, we store the shapes for the entire proto
+ // chain so we can check that the cached iterator is still valid
+ // (see MacroAssembler::maybeLoadIteratorFromShape). If this iterator
+ // has indices, then even if it isn't cacheable we need to store the
+ // shape of the iterated object itself (see IteratorHasIndicesAndBranch).
+ // In the former case, assert that we're storing the entire proto chain.
+ MOZ_ASSERT_IF(numShapes > 1, pobj == nullptr);
+ }
+ MOZ_ASSERT(static_cast<void*>(shapesEnd_) == propertyCursor_);
+
+ size_t numProps = props.length();
+ for (size_t i = 0; i < numProps; i++) {
+ JSLinearString* str = IdToString(cx, props[i]);
+ if (!str) {
+ *hadError = true;
+ return;
+ }
+ new (propertiesEnd_) GCPtr<JSLinearString*>(str);
+ propertiesEnd_++;
+ }
+
+ if (hasActualIndices) {
+ PropertyIndex* cursor = indicesBegin();
+ for (size_t i = 0; i < numProps; i++) {
+ *cursor++ = (*indices)[i];
+ }
+ MOZ_ASSERT(uintptr_t(cursor) == uintptr_t(this) + nbytes);
+ setIndicesState(NativeIteratorIndices::Valid);
+ }
+
+ markInitialized();
+
+ MOZ_ASSERT(!*hadError);
+}
+
+inline size_t NativeIterator::allocationSize() const {
+ size_t numShapes = shapesEnd() - shapesBegin();
+
+ return AllocationSize(initialPropertyCount(), numShapes, indicesAllocated());
+}
+
+/* static */
+bool IteratorHashPolicy::match(PropertyIteratorObject* obj,
+ const Lookup& lookup) {
+ NativeIterator* ni = obj->getNativeIterator();
+ if (ni->shapesHash() != lookup.shapesHash ||
+ ni->shapeCount() != lookup.numShapes) {
+ return false;
+ }
+
+ return ArrayEqual(reinterpret_cast<Shape**>(ni->shapesBegin()), lookup.shapes,
+ ni->shapeCount());
+}
+
+static inline bool CanCompareIterableObjectToCache(JSObject* obj) {
+ if (obj->is<NativeObject>()) {
+ return obj->as<NativeObject>().getDenseInitializedLength() == 0;
+ }
+ return false;
+}
+
+static bool CanStoreInIteratorCache(JSObject* obj) {
+ do {
+ MOZ_ASSERT(obj->as<NativeObject>().getDenseInitializedLength() == 0);
+
+ // Typed arrays have indexed properties not captured by the Shape guard.
+ // Enumerate hooks may add extra properties.
+ if (MOZ_UNLIKELY(ClassCanHaveExtraEnumeratedProperties(obj->getClass()))) {
+ return false;
+ }
+
+ obj = obj->staticPrototype();
+ } while (obj);
+
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE PropertyIteratorObject* LookupInIteratorCache(
+ JSContext* cx, JSObject* obj, uint32_t* cacheableProtoChainLength) {
+ MOZ_ASSERT(*cacheableProtoChainLength == 0);
+
+ if (obj->shape()->cache().isIterator() &&
+ CanCompareIterableObjectToCache(obj)) {
+ PropertyIteratorObject* iterobj = obj->shape()->cache().toIterator();
+ NativeIterator* ni = iterobj->getNativeIterator();
+ MOZ_ASSERT(*ni->shapesBegin() == obj->shape());
+ if (!ni->isReusable()) {
+ return nullptr;
+ }
+
+ // Verify shapes of proto chain.
+ JSObject* pobj = obj;
+ for (GCPtr<Shape*>* s = ni->shapesBegin() + 1; s != ni->shapesEnd(); s++) {
+ Shape* shape = *s;
+ pobj = pobj->staticPrototype();
+ if (pobj->shape() != shape) {
+ return nullptr;
+ }
+ if (!CanCompareIterableObjectToCache(pobj)) {
+ return nullptr;
+ }
+ }
+ MOZ_ASSERT(CanStoreInIteratorCache(obj));
+ *cacheableProtoChainLength = ni->shapeCount();
+ return iterobj;
+ }
+
+ Vector<Shape*, 8> shapes(cx);
+ HashNumber shapesHash = 0;
+ JSObject* pobj = obj;
+ do {
+ if (!CanCompareIterableObjectToCache(pobj)) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(pobj->is<NativeObject>());
+ Shape* shape = pobj->shape();
+ shapesHash = mozilla::AddToHash(shapesHash, HashIteratorShape(shape));
+
+ if (MOZ_UNLIKELY(!shapes.append(shape))) {
+ cx->recoverFromOutOfMemory();
+ return nullptr;
+ }
+
+ pobj = pobj->staticPrototype();
+ } while (pobj);
+
+ MOZ_ASSERT(!shapes.empty());
+ *cacheableProtoChainLength = shapes.length();
+
+ IteratorHashPolicy::Lookup lookup(shapes.begin(), shapes.length(),
+ shapesHash);
+ auto p = ObjectRealm::get(obj).iteratorCache.lookup(lookup);
+ if (!p) {
+ return nullptr;
+ }
+
+ PropertyIteratorObject* iterobj = *p;
+ MOZ_ASSERT(iterobj->compartment() == cx->compartment());
+
+ NativeIterator* ni = iterobj->getNativeIterator();
+ if (!ni->isReusable()) {
+ return nullptr;
+ }
+
+ return iterobj;
+}
+
+[[nodiscard]] static bool StoreInIteratorCache(
+ JSContext* cx, JSObject* obj, PropertyIteratorObject* iterobj) {
+ MOZ_ASSERT(CanStoreInIteratorCache(obj));
+
+ NativeIterator* ni = iterobj->getNativeIterator();
+ MOZ_ASSERT(ni->shapeCount() > 0);
+
+ obj->shape()->maybeCacheIterator(cx, iterobj);
+
+ IteratorHashPolicy::Lookup lookup(
+ reinterpret_cast<Shape**>(ni->shapesBegin()), ni->shapeCount(),
+ ni->shapesHash());
+
+ ObjectRealm::IteratorCache& cache = ObjectRealm::get(obj).iteratorCache;
+ bool ok;
+ auto p = cache.lookupForAdd(lookup);
+ if (MOZ_LIKELY(!p)) {
+ ok = cache.add(p, iterobj);
+ } else {
+ // If we weren't able to use an existing cached iterator, just
+ // replace it.
+ cache.remove(p);
+ ok = cache.relookupOrAdd(p, lookup, iterobj);
+ }
+ if (!ok) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool js::EnumerateProperties(JSContext* cx, HandleObject obj,
+ MutableHandleIdVector props) {
+ MOZ_ASSERT(props.empty());
+
+ if (MOZ_UNLIKELY(obj->is<ProxyObject>())) {
+ return Proxy::enumerate(cx, obj, props);
+ }
+
+ uint32_t flags = 0;
+ PropertyEnumerator enumerator(cx, obj, flags, props);
+ return enumerator.snapshot(cx);
+}
+
+#ifdef DEBUG
+static bool IndicesAreValid(NativeObject* obj, NativeIterator* ni) {
+ MOZ_ASSERT(ni->hasValidIndices());
+ size_t numDenseElements = obj->getDenseInitializedLength();
+ size_t numFixedSlots = obj->numFixedSlots();
+ const Value* elements = obj->getDenseElements();
+
+ GCPtr<JSLinearString*>* keys = ni->propertiesBegin();
+ PropertyIndex* indices = ni->indicesBegin();
+
+ for (uint32_t i = 0; i < ni->numKeys(); i++) {
+ PropertyIndex index = indices[i];
+ switch (index.kind()) {
+ case PropertyIndex::Kind::Element:
+ // Verify that the dense element exists and is not a hole.
+ if (index.index() >= numDenseElements ||
+ elements[index.index()].isMagic(JS_ELEMENTS_HOLE)) {
+ return false;
+ }
+ break;
+ case PropertyIndex::Kind::FixedSlot: {
+ // Verify that the slot exists and is an enumerable data property with
+ // the expected key.
+ Maybe<PropertyInfo> prop =
+ obj->lookupPure(AtomToId(&keys[i]->asAtom()));
+ if (!prop.isSome() || !prop->hasSlot() || !prop->enumerable() ||
+ !prop->isDataProperty() || prop->slot() != index.index()) {
+ return false;
+ }
+ break;
+ }
+ case PropertyIndex::Kind::DynamicSlot: {
+ // Verify that the slot exists and is an enumerable data property with
+ // the expected key.
+ Maybe<PropertyInfo> prop =
+ obj->lookupPure(AtomToId(&keys[i]->asAtom()));
+ if (!prop.isSome() || !prop->hasSlot() || !prop->enumerable() ||
+ !prop->isDataProperty() ||
+ prop->slot() - numFixedSlots != index.index()) {
+ return false;
+ }
+ break;
+ }
+ case PropertyIndex::Kind::Invalid:
+ return false;
+ }
+ }
+ return true;
+}
+#endif
+
+template <bool WantIndices>
+static PropertyIteratorObject* GetIteratorImpl(JSContext* cx,
+ HandleObject obj) {
+ MOZ_ASSERT(!obj->is<PropertyIteratorObject>());
+ MOZ_ASSERT(cx->compartment() == obj->compartment(),
+ "We may end up allocating shapes in the wrong zone!");
+
+ uint32_t cacheableProtoChainLength = 0;
+ if (PropertyIteratorObject* iterobj =
+ LookupInIteratorCache(cx, obj, &cacheableProtoChainLength)) {
+ NativeIterator* ni = iterobj->getNativeIterator();
+ bool recreateWithIndices = WantIndices && ni->indicesAvailableOnRequest();
+ if (!recreateWithIndices) {
+ MOZ_ASSERT_IF(WantIndices && ni->hasValidIndices(),
+ IndicesAreValid(&obj->as<NativeObject>(), ni));
+ ni->initObjectBeingIterated(*obj);
+ RegisterEnumerator(cx, ni);
+ return iterobj;
+ }
+ }
+
+ if (cacheableProtoChainLength > 0 && !CanStoreInIteratorCache(obj)) {
+ cacheableProtoChainLength = 0;
+ }
+
+ RootedIdVector keys(cx);
+ PropertyIndexVector indices(cx);
+ bool supportsIndices = false;
+
+ if (MOZ_UNLIKELY(obj->is<ProxyObject>())) {
+ if (!Proxy::enumerate(cx, obj, &keys)) {
+ return nullptr;
+ }
+ } else {
+ uint32_t flags = 0;
+ PropertyEnumerator enumerator(cx, obj, flags, &keys, &indices);
+ if (!enumerator.snapshot(cx)) {
+ return nullptr;
+ }
+ supportsIndices = enumerator.supportsIndices();
+ MOZ_ASSERT_IF(WantIndices && supportsIndices,
+ keys.length() == indices.length());
+ }
+
+ // If the object has dense elements, mark the dense elements as
+ // maybe-in-iteration.
+ //
+ // The iterator is a snapshot so if indexed properties are added after this
+ // point we don't need to do anything. However, the object might have sparse
+ // elements now that can be densified later. To account for this, we set the
+ // maybe-in-iteration flag also in NativeObject::maybeDensifySparseElements.
+ //
+ // In debug builds, AssertDenseElementsNotIterated is used to check the flag
+ // is set correctly.
+ if (obj->is<NativeObject>() &&
+ obj->as<NativeObject>().getDenseInitializedLength() > 0) {
+ obj->as<NativeObject>().markDenseElementsMaybeInIteration();
+ }
+
+ PropertyIndexVector* indicesPtr =
+ WantIndices && supportsIndices ? &indices : nullptr;
+ PropertyIteratorObject* iterobj = CreatePropertyIterator(
+ cx, obj, keys, supportsIndices, indicesPtr, cacheableProtoChainLength);
+ if (!iterobj) {
+ return nullptr;
+ }
+ RegisterEnumerator(cx, iterobj->getNativeIterator());
+
+ cx->check(iterobj);
+ MOZ_ASSERT_IF(
+ WantIndices && supportsIndices,
+ IndicesAreValid(&obj->as<NativeObject>(), iterobj->getNativeIterator()));
+
+#ifdef DEBUG
+ if (obj->is<NativeObject>()) {
+ if (PrototypeMayHaveIndexedProperties(&obj->as<NativeObject>())) {
+ iterobj->getNativeIterator()->setMaybeHasIndexedPropertiesFromProto();
+ }
+ }
+#endif
+
+ // Cache the iterator object.
+ if (cacheableProtoChainLength > 0) {
+ if (!StoreInIteratorCache(cx, obj, iterobj)) {
+ return nullptr;
+ }
+ }
+
+ return iterobj;
+}
+
+PropertyIteratorObject* js::GetIterator(JSContext* cx, HandleObject obj) {
+ return GetIteratorImpl<false>(cx, obj);
+}
+
+PropertyIteratorObject* js::GetIteratorWithIndices(JSContext* cx,
+ HandleObject obj) {
+ return GetIteratorImpl<true>(cx, obj);
+}
+
+PropertyIteratorObject* js::LookupInIteratorCache(JSContext* cx,
+ HandleObject obj) {
+ uint32_t dummy = 0;
+ return LookupInIteratorCache(cx, obj, &dummy);
+}
+
+// ES 2017 draft 7.4.7.
+PlainObject* js::CreateIterResultObject(JSContext* cx, HandleValue value,
+ bool done) {
+ // Step 1 (implicit).
+
+ // Step 2.
+ Rooted<PlainObject*> templateObject(
+ cx, GlobalObject::getOrCreateIterResultTemplateObject(cx));
+ if (!templateObject) {
+ return nullptr;
+ }
+
+ PlainObject* resultObj = PlainObject::createWithTemplate(cx, templateObject);
+ if (!resultObj) {
+ return nullptr;
+ }
+
+ // Step 3.
+ resultObj->setSlot(GlobalObject::IterResultObjectValueSlot, value);
+
+ // Step 4.
+ resultObj->setSlot(GlobalObject::IterResultObjectDoneSlot,
+ done ? TrueHandleValue : FalseHandleValue);
+
+ // Step 5.
+ return resultObj;
+}
+
+PlainObject* GlobalObject::getOrCreateIterResultTemplateObject(JSContext* cx) {
+ HeapPtr<PlainObject*>& obj = cx->global()->data().iterResultTemplate;
+ if (obj) {
+ return obj;
+ }
+
+ PlainObject* templateObj =
+ createIterResultTemplateObject(cx, WithObjectPrototype::Yes);
+ obj.init(templateObj);
+ return obj;
+}
+
+/* static */
+PlainObject* GlobalObject::getOrCreateIterResultWithoutPrototypeTemplateObject(
+ JSContext* cx) {
+ HeapPtr<PlainObject*>& obj =
+ cx->global()->data().iterResultWithoutPrototypeTemplate;
+ if (obj) {
+ return obj;
+ }
+
+ PlainObject* templateObj =
+ createIterResultTemplateObject(cx, WithObjectPrototype::No);
+ obj.init(templateObj);
+ return obj;
+}
+
+/* static */
+PlainObject* GlobalObject::createIterResultTemplateObject(
+ JSContext* cx, WithObjectPrototype withProto) {
+ // Create template plain object
+ Rooted<PlainObject*> templateObject(
+ cx, withProto == WithObjectPrototype::Yes
+ ? NewPlainObject(cx, TenuredObject)
+ : NewPlainObjectWithProto(cx, nullptr));
+ if (!templateObject) {
+ return nullptr;
+ }
+
+ // Set dummy `value` property
+ if (!NativeDefineDataProperty(cx, templateObject, cx->names().value,
+ UndefinedHandleValue, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+
+ // Set dummy `done` property
+ if (!NativeDefineDataProperty(cx, templateObject, cx->names().done,
+ TrueHandleValue, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+
+#ifdef DEBUG
+ // Make sure that the properties are in the right slots.
+ ShapePropertyIter<NoGC> iter(templateObject->shape());
+ MOZ_ASSERT(iter->slot() == GlobalObject::IterResultObjectDoneSlot &&
+ iter->key() == NameToId(cx->names().done));
+ iter++;
+ MOZ_ASSERT(iter->slot() == GlobalObject::IterResultObjectValueSlot &&
+ iter->key() == NameToId(cx->names().value));
+#endif
+
+ return templateObject;
+}
+
+/*** Iterator objects *******************************************************/
+
+size_t PropertyIteratorObject::sizeOfMisc(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(getNativeIterator());
+}
+
+void PropertyIteratorObject::trace(JSTracer* trc, JSObject* obj) {
+ if (NativeIterator* ni =
+ obj->as<PropertyIteratorObject>().getNativeIterator()) {
+ ni->trace(trc);
+ }
+}
+
+void PropertyIteratorObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ if (NativeIterator* ni =
+ obj->as<PropertyIteratorObject>().getNativeIterator()) {
+ gcx->free_(obj, ni, ni->allocationSize(), MemoryUse::NativeIterator);
+ }
+}
+
+const JSClassOps PropertyIteratorObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ trace, // trace
+};
+
+const JSClass PropertyIteratorObject::class_ = {
+ "Iterator",
+ JSCLASS_HAS_RESERVED_SLOTS(SlotCount) | JSCLASS_BACKGROUND_FINALIZE,
+ &PropertyIteratorObject::classOps_};
+
+static const JSClass ArrayIteratorPrototypeClass = {"Array Iterator", 0};
+
+enum {
+ ArrayIteratorSlotIteratedObject,
+ ArrayIteratorSlotNextIndex,
+ ArrayIteratorSlotItemKind,
+ ArrayIteratorSlotCount
+};
+
+const JSClass ArrayIteratorObject::class_ = {
+ "Array Iterator", JSCLASS_HAS_RESERVED_SLOTS(ArrayIteratorSlotCount)};
+
+ArrayIteratorObject* js::NewArrayIteratorTemplate(JSContext* cx) {
+ RootedObject proto(
+ cx, GlobalObject::getOrCreateArrayIteratorPrototype(cx, cx->global()));
+ if (!proto) {
+ return nullptr;
+ }
+
+ return NewTenuredObjectWithGivenProto<ArrayIteratorObject>(cx, proto);
+}
+
+ArrayIteratorObject* js::NewArrayIterator(JSContext* cx) {
+ RootedObject proto(
+ cx, GlobalObject::getOrCreateArrayIteratorPrototype(cx, cx->global()));
+ if (!proto) {
+ return nullptr;
+ }
+
+ return NewObjectWithGivenProto<ArrayIteratorObject>(cx, proto);
+}
+
+static const JSFunctionSpec array_iterator_methods[] = {
+ JS_SELF_HOSTED_FN("next", "ArrayIteratorNext", 0, 0), JS_FS_END};
+
+static const JSClass StringIteratorPrototypeClass = {"String Iterator", 0};
+
+enum {
+ StringIteratorSlotIteratedObject,
+ StringIteratorSlotNextIndex,
+ StringIteratorSlotCount
+};
+
+const JSClass StringIteratorObject::class_ = {
+ "String Iterator", JSCLASS_HAS_RESERVED_SLOTS(StringIteratorSlotCount)};
+
+static const JSFunctionSpec string_iterator_methods[] = {
+ JS_SELF_HOSTED_FN("next", "StringIteratorNext", 0, 0), JS_FS_END};
+
+StringIteratorObject* js::NewStringIteratorTemplate(JSContext* cx) {
+ RootedObject proto(
+ cx, GlobalObject::getOrCreateStringIteratorPrototype(cx, cx->global()));
+ if (!proto) {
+ return nullptr;
+ }
+
+ return NewTenuredObjectWithGivenProto<StringIteratorObject>(cx, proto);
+}
+
+StringIteratorObject* js::NewStringIterator(JSContext* cx) {
+ RootedObject proto(
+ cx, GlobalObject::getOrCreateStringIteratorPrototype(cx, cx->global()));
+ if (!proto) {
+ return nullptr;
+ }
+
+ return NewObjectWithGivenProto<StringIteratorObject>(cx, proto);
+}
+
+static const JSClass RegExpStringIteratorPrototypeClass = {
+ "RegExp String Iterator", 0};
+
+enum {
+ // The regular expression used for iteration. May hold the original RegExp
+ // object when it is reused instead of a new RegExp object.
+ RegExpStringIteratorSlotRegExp,
+
+ // The String value being iterated upon.
+ RegExpStringIteratorSlotString,
+
+ // The source string of the original RegExp object. Used to validate we can
+ // reuse the original RegExp object for matching.
+ RegExpStringIteratorSlotSource,
+
+ // The flags of the original RegExp object.
+ RegExpStringIteratorSlotFlags,
+
+ // When non-negative, this slot holds the current lastIndex position when
+ // reusing the original RegExp object for matching. When set to |-1|, the
+ // iterator has finished. When set to any other negative value, the
+ // iterator is not yet exhausted and we're not on the fast path and we're
+ // not reusing the input RegExp object.
+ RegExpStringIteratorSlotLastIndex,
+
+ RegExpStringIteratorSlotCount
+};
+
+static_assert(RegExpStringIteratorSlotRegExp ==
+ REGEXP_STRING_ITERATOR_REGEXP_SLOT,
+ "RegExpStringIteratorSlotRegExp must match self-hosting define "
+ "for regexp slot.");
+static_assert(RegExpStringIteratorSlotString ==
+ REGEXP_STRING_ITERATOR_STRING_SLOT,
+ "RegExpStringIteratorSlotString must match self-hosting define "
+ "for string slot.");
+static_assert(RegExpStringIteratorSlotSource ==
+ REGEXP_STRING_ITERATOR_SOURCE_SLOT,
+ "RegExpStringIteratorSlotString must match self-hosting define "
+ "for source slot.");
+static_assert(RegExpStringIteratorSlotFlags ==
+ REGEXP_STRING_ITERATOR_FLAGS_SLOT,
+ "RegExpStringIteratorSlotFlags must match self-hosting define "
+ "for flags slot.");
+static_assert(RegExpStringIteratorSlotLastIndex ==
+ REGEXP_STRING_ITERATOR_LASTINDEX_SLOT,
+ "RegExpStringIteratorSlotLastIndex must match self-hosting "
+ "define for lastIndex slot.");
+
+const JSClass RegExpStringIteratorObject::class_ = {
+ "RegExp String Iterator",
+ JSCLASS_HAS_RESERVED_SLOTS(RegExpStringIteratorSlotCount)};
+
+static const JSFunctionSpec regexp_string_iterator_methods[] = {
+ JS_SELF_HOSTED_FN("next", "RegExpStringIteratorNext", 0, 0),
+
+ JS_FS_END};
+
+RegExpStringIteratorObject* js::NewRegExpStringIteratorTemplate(JSContext* cx) {
+ RootedObject proto(cx, GlobalObject::getOrCreateRegExpStringIteratorPrototype(
+ cx, cx->global()));
+ if (!proto) {
+ return nullptr;
+ }
+
+ return NewTenuredObjectWithGivenProto<RegExpStringIteratorObject>(cx, proto);
+}
+
+RegExpStringIteratorObject* js::NewRegExpStringIterator(JSContext* cx) {
+ RootedObject proto(cx, GlobalObject::getOrCreateRegExpStringIteratorPrototype(
+ cx, cx->global()));
+ if (!proto) {
+ return nullptr;
+ }
+
+ return NewObjectWithGivenProto<RegExpStringIteratorObject>(cx, proto);
+}
+
+// static
+PropertyIteratorObject* GlobalObject::getOrCreateEmptyIterator(JSContext* cx) {
+ if (!cx->global()->data().emptyIterator) {
+ RootedIdVector props(cx); // Empty
+ PropertyIteratorObject* iter =
+ CreatePropertyIterator(cx, nullptr, props, false, nullptr, 0);
+ if (!iter) {
+ return nullptr;
+ }
+ iter->getNativeIterator()->markEmptyIteratorSingleton();
+ cx->global()->data().emptyIterator.init(iter);
+ }
+ return cx->global()->data().emptyIterator;
+}
+
+PropertyIteratorObject* js::ValueToIterator(JSContext* cx, HandleValue vp) {
+ RootedObject obj(cx);
+ if (vp.isObject()) {
+ /* Common case. */
+ obj = &vp.toObject();
+ } else if (vp.isNullOrUndefined()) {
+ /*
+ * Enumerating over null and undefined gives an empty enumerator, so
+ * that |for (var p in <null or undefined>) <loop>;| never executes
+ * <loop>, per ES5 12.6.4.
+ */
+ return GlobalObject::getOrCreateEmptyIterator(cx);
+ } else {
+ obj = ToObject(cx, vp);
+ if (!obj) {
+ return nullptr;
+ }
+ }
+
+ return GetIterator(cx, obj);
+}
+
+void js::CloseIterator(JSObject* obj) {
+ if (!obj->is<PropertyIteratorObject>()) {
+ return;
+ }
+
+ // Remove iterator from the active list, which is a stack. The shared iterator
+ // used for for-in with null/undefined is immutable and unlinked.
+
+ NativeIterator* ni = obj->as<PropertyIteratorObject>().getNativeIterator();
+ if (ni->isEmptyIteratorSingleton()) {
+ return;
+ }
+
+ ni->unlink();
+
+ MOZ_ASSERT(ni->isActive());
+ ni->markInactive();
+
+ ni->clearObjectBeingIterated();
+
+ // Reset the enumerator; it may still be in the cached iterators for
+ // this thread and can be reused.
+ ni->resetPropertyCursorForReuse();
+}
+
+bool js::IteratorCloseForException(JSContext* cx, HandleObject obj) {
+ MOZ_ASSERT(cx->isExceptionPending());
+
+ bool isClosingGenerator = cx->isClosingGenerator();
+ JS::AutoSaveExceptionState savedExc(cx);
+
+ // Implements IteratorClose (ES 7.4.6) for exception unwinding. See
+ // also the bytecode generated by BytecodeEmitter::emitIteratorClose.
+
+ // Step 3.
+ //
+ // Get the "return" method.
+ RootedValue returnMethod(cx);
+ if (!GetProperty(cx, obj, obj, cx->names().return_, &returnMethod)) {
+ return false;
+ }
+
+ // Step 4.
+ //
+ // Do nothing if "return" is null or undefined. Throw a TypeError if the
+ // method is not IsCallable.
+ if (returnMethod.isNullOrUndefined()) {
+ return true;
+ }
+ if (!IsCallable(returnMethod)) {
+ return ReportIsNotFunction(cx, returnMethod);
+ }
+
+ // Step 5, 6, 8.
+ //
+ // Call "return" if it is not null or undefined.
+ RootedValue rval(cx);
+ bool ok = Call(cx, returnMethod, obj, &rval);
+ if (isClosingGenerator) {
+ // Closing an iterator is implemented as an exception, but in spec
+ // terms it is a Completion value with [[Type]] return. In this case
+ // we *do* care if the call threw and if it returned an object.
+ if (!ok) {
+ return false;
+ }
+ if (!rval.isObject()) {
+ return ThrowCheckIsObject(cx, CheckIsObjectKind::IteratorReturn);
+ }
+ } else {
+ // We don't care if the call threw or that it returned an Object, as
+ // Step 6 says if IteratorClose is being called during a throw, the
+ // original throw has primacy.
+ savedExc.restore();
+ }
+
+ return true;
+}
+
+void js::UnwindIteratorForUncatchableException(JSObject* obj) {
+ if (obj->is<PropertyIteratorObject>()) {
+ NativeIterator* ni = obj->as<PropertyIteratorObject>().getNativeIterator();
+ if (ni->isEmptyIteratorSingleton()) {
+ return;
+ }
+ ni->unlink();
+ }
+}
+
+static bool SuppressDeletedProperty(JSContext* cx, NativeIterator* ni,
+ HandleObject obj,
+ Handle<JSLinearString*> str) {
+ if (ni->objectBeingIterated() != obj) {
+ return true;
+ }
+
+ ni->disableIndices();
+
+ // Optimization for the following common case:
+ //
+ // for (var p in o) {
+ // delete o[p];
+ // }
+ //
+ // Note that usually both strings will be atoms so we only check for pointer
+ // equality here.
+ if (ni->previousPropertyWas(str)) {
+ return true;
+ }
+
+ while (true) {
+ bool restart = false;
+
+ // Check whether id is still to come.
+ GCPtr<JSLinearString*>* const cursor = ni->nextProperty();
+ GCPtr<JSLinearString*>* const end = ni->propertiesEnd();
+ for (GCPtr<JSLinearString*>* idp = cursor; idp < end; ++idp) {
+ // Common case: both strings are atoms.
+ if ((*idp)->isAtom() && str->isAtom()) {
+ if (*idp != str) {
+ continue;
+ }
+ } else {
+ if (!EqualStrings(*idp, str)) {
+ continue;
+ }
+ }
+
+ // Check whether another property along the prototype chain became
+ // visible as a result of this deletion.
+ RootedObject proto(cx);
+ if (!GetPrototype(cx, obj, &proto)) {
+ return false;
+ }
+ if (proto) {
+ RootedId id(cx);
+ RootedValue idv(cx, StringValue(*idp));
+ if (!PrimitiveValueToId<CanGC>(cx, idv, &id)) {
+ return false;
+ }
+
+ Rooted<mozilla::Maybe<PropertyDescriptor>> desc(cx);
+ RootedObject holder(cx);
+ if (!GetPropertyDescriptor(cx, proto, id, &desc, &holder)) {
+ return false;
+ }
+
+ if (desc.isSome() && desc->enumerable()) {
+ continue;
+ }
+ }
+
+ // If GetPropertyDescriptor above removed a property from ni, start
+ // over.
+ if (end != ni->propertiesEnd() || cursor != ni->nextProperty()) {
+ restart = true;
+ break;
+ }
+
+ // No property along the prototype chain stepped in to take the
+ // property's place, so go ahead and delete id from the list.
+ // If it is the next property to be enumerated, just skip it.
+ if (idp == cursor) {
+ ni->incCursor();
+ } else {
+ for (GCPtr<JSLinearString*>* p = idp; p + 1 != end; p++) {
+ *p = *(p + 1);
+ }
+
+ ni->trimLastProperty();
+ }
+
+ ni->markHasUnvisitedPropertyDeletion();
+ return true;
+ }
+
+ if (!restart) {
+ return true;
+ }
+ }
+}
+
+/*
+ * Suppress enumeration of deleted properties. This function must be called
+ * when a property is deleted and there might be active enumerators.
+ *
+ * We maintain a list of active non-escaping for-in enumerators. To suppress
+ * a property, we check whether each active enumerator contains the (obj, id)
+ * pair and has not yet enumerated |id|. If so, and |id| is the next property,
+ * we simply advance the cursor. Otherwise, we delete |id| from the list.
+ *
+ * We do not suppress enumeration of a property deleted along an object's
+ * prototype chain. Only direct deletions on the object are handled.
+ */
+static bool SuppressDeletedPropertyHelper(JSContext* cx, HandleObject obj,
+ Handle<JSLinearString*> str) {
+ NativeIteratorListIter iter(obj->compartment()->enumeratorsAddr());
+ while (!iter.done()) {
+ NativeIterator* ni = iter.next();
+ if (!SuppressDeletedProperty(cx, ni, obj, str)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool js::SuppressDeletedProperty(JSContext* cx, HandleObject obj, jsid id) {
+ if (MOZ_LIKELY(!obj->compartment()->objectMaybeInIteration(obj))) {
+ return true;
+ }
+
+ if (id.isSymbol()) {
+ return true;
+ }
+
+ Rooted<JSLinearString*> str(cx, IdToString(cx, id));
+ if (!str) {
+ return false;
+ }
+ return SuppressDeletedPropertyHelper(cx, obj, str);
+}
+
+bool js::SuppressDeletedElement(JSContext* cx, HandleObject obj,
+ uint32_t index) {
+ if (MOZ_LIKELY(!obj->compartment()->objectMaybeInIteration(obj))) {
+ return true;
+ }
+
+ RootedId id(cx);
+ if (!IndexToId(cx, index, &id)) {
+ return false;
+ }
+
+ Rooted<JSLinearString*> str(cx, IdToString(cx, id));
+ if (!str) {
+ return false;
+ }
+ return SuppressDeletedPropertyHelper(cx, obj, str);
+}
+
+#ifdef DEBUG
+void js::AssertDenseElementsNotIterated(NativeObject* obj) {
+ // Search for active iterators for |obj| and assert they don't contain any
+ // property keys that are dense elements. This is used to check correctness
+ // of the MAYBE_IN_ITERATION flag on ObjectElements.
+ //
+ // Ignore iterators that may contain indexed properties from objects on the
+ // prototype chain, as that can result in false positives. See bug 1656744.
+
+ // Limit the number of properties we check to avoid slowing down debug builds
+ // too much.
+ static constexpr uint32_t MaxPropsToCheck = 10;
+ uint32_t propsChecked = 0;
+
+ NativeIteratorListIter iter(obj->compartment()->enumeratorsAddr());
+ while (!iter.done()) {
+ NativeIterator* ni = iter.next();
+ if (ni->objectBeingIterated() == obj &&
+ !ni->maybeHasIndexedPropertiesFromProto()) {
+ for (GCPtr<JSLinearString*>* idp = ni->nextProperty();
+ idp < ni->propertiesEnd(); ++idp) {
+ uint32_t index;
+ if (idp->get()->isIndex(&index)) {
+ MOZ_ASSERT(!obj->containsDenseElement(index));
+ }
+ if (++propsChecked > MaxPropsToCheck) {
+ return;
+ }
+ }
+ }
+ }
+}
+#endif
+
+static const JSFunctionSpec iterator_methods[] = {
+ JS_SELF_HOSTED_SYM_FN(iterator, "IteratorIdentity", 0, 0), JS_FS_END};
+
+static const JSFunctionSpec iterator_static_methods[] = {
+ JS_SELF_HOSTED_FN("from", "IteratorFrom", 1, 0), JS_FS_END};
+
+// These methods are only attached to Iterator.prototype when the
+// Iterator Helpers feature is enabled.
+static const JSFunctionSpec iterator_methods_with_helpers[] = {
+ JS_SELF_HOSTED_FN("map", "IteratorMap", 1, 0),
+ JS_SELF_HOSTED_FN("filter", "IteratorFilter", 1, 0),
+ JS_SELF_HOSTED_FN("take", "IteratorTake", 1, 0),
+ JS_SELF_HOSTED_FN("drop", "IteratorDrop", 1, 0),
+ JS_SELF_HOSTED_FN("asIndexedPairs", "IteratorAsIndexedPairs", 0, 0),
+ JS_SELF_HOSTED_FN("flatMap", "IteratorFlatMap", 1, 0),
+ JS_SELF_HOSTED_FN("reduce", "IteratorReduce", 1, 0),
+ JS_SELF_HOSTED_FN("toArray", "IteratorToArray", 0, 0),
+ JS_SELF_HOSTED_FN("forEach", "IteratorForEach", 1, 0),
+ JS_SELF_HOSTED_FN("some", "IteratorSome", 1, 0),
+ JS_SELF_HOSTED_FN("every", "IteratorEvery", 1, 0),
+ JS_SELF_HOSTED_FN("find", "IteratorFind", 1, 0),
+ JS_SELF_HOSTED_SYM_FN(iterator, "IteratorIdentity", 0, 0),
+ JS_FS_END};
+
+/* static */
+bool GlobalObject::initIteratorProto(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ if (global->hasBuiltinProto(ProtoKind::IteratorProto)) {
+ return true;
+ }
+
+ RootedObject proto(
+ cx, GlobalObject::createBlankPrototype<PlainObject>(cx, global));
+ if (!proto) {
+ return false;
+ }
+
+ // %IteratorPrototype%.map.[[Prototype]] is %Generator% and
+ // %Generator%.prototype.[[Prototype]] is %IteratorPrototype%.
+ // Populate the slot early, to prevent runaway mutual recursion.
+ global->initBuiltinProto(ProtoKind::IteratorProto, proto);
+
+ if (!DefinePropertiesAndFunctions(cx, proto, nullptr, iterator_methods)) {
+ // In this case, we leave a partially initialized object in the
+ // slot. There's no obvious way to do better, since this object may already
+ // be in the prototype chain of %GeneratorPrototype%.
+ return false;
+ }
+
+ return true;
+}
+
+/* static */
+template <GlobalObject::ProtoKind Kind, const JSClass* ProtoClass,
+ const JSFunctionSpec* Methods>
+bool GlobalObject::initObjectIteratorProto(JSContext* cx,
+ Handle<GlobalObject*> global,
+ Handle<JSAtom*> tag) {
+ if (global->hasBuiltinProto(Kind)) {
+ return true;
+ }
+
+ RootedObject iteratorProto(
+ cx, GlobalObject::getOrCreateIteratorPrototype(cx, global));
+ if (!iteratorProto) {
+ return false;
+ }
+
+ RootedObject proto(cx, GlobalObject::createBlankPrototypeInheriting(
+ cx, ProtoClass, iteratorProto));
+ if (!proto || !DefinePropertiesAndFunctions(cx, proto, nullptr, Methods) ||
+ (tag && !DefineToStringTag(cx, proto, tag))) {
+ return false;
+ }
+
+ global->initBuiltinProto(Kind, proto);
+ return true;
+}
+
+/* static */
+NativeObject* GlobalObject::getOrCreateArrayIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ return MaybeNativeObject(getOrCreateBuiltinProto(
+ cx, global, ProtoKind::ArrayIteratorProto,
+ cx->names().ArrayIterator.toHandle(),
+ initObjectIteratorProto<ProtoKind::ArrayIteratorProto,
+ &ArrayIteratorPrototypeClass,
+ array_iterator_methods>));
+}
+
+/* static */
+JSObject* GlobalObject::getOrCreateStringIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ return getOrCreateBuiltinProto(
+ cx, global, ProtoKind::StringIteratorProto,
+ cx->names().StringIterator.toHandle(),
+ initObjectIteratorProto<ProtoKind::StringIteratorProto,
+ &StringIteratorPrototypeClass,
+ string_iterator_methods>);
+}
+
+/* static */
+JSObject* GlobalObject::getOrCreateRegExpStringIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ return getOrCreateBuiltinProto(
+ cx, global, ProtoKind::RegExpStringIteratorProto,
+ cx->names().RegExpStringIterator.toHandle(),
+ initObjectIteratorProto<ProtoKind::RegExpStringIteratorProto,
+ &RegExpStringIteratorPrototypeClass,
+ regexp_string_iterator_methods>);
+}
+
+// Iterator Helper Proposal 2.1.3.1 Iterator()
+// https://tc39.es/proposal-iterator-helpers/#sec-iterator as of revision
+// ed6e15a
+static bool IteratorConstructor(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Step 1.
+ if (!ThrowIfNotConstructing(cx, args, js_Iterator_str)) {
+ return false;
+ }
+ // Throw TypeError if NewTarget is the active function object, preventing the
+ // Iterator constructor from being used directly.
+ if (args.callee() == args.newTarget().toObject()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BOGUS_CONSTRUCTOR, js_Iterator_str);
+ return false;
+ }
+
+ // Step 2.
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_Iterator, &proto)) {
+ return false;
+ }
+
+ JSObject* obj = NewObjectWithClassProto<IteratorObject>(cx, proto);
+ if (!obj) {
+ return false;
+ }
+
+ args.rval().setObject(*obj);
+ return true;
+}
+
+static const ClassSpec IteratorObjectClassSpec = {
+ GenericCreateConstructor<IteratorConstructor, 0, gc::AllocKind::FUNCTION>,
+ GenericCreatePrototype<IteratorObject>,
+ iterator_static_methods,
+ nullptr,
+ iterator_methods_with_helpers,
+ nullptr,
+ nullptr,
+};
+
+const JSClass IteratorObject::class_ = {
+ js_Iterator_str,
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Iterator),
+ JS_NULL_CLASS_OPS,
+ &IteratorObjectClassSpec,
+};
+
+const JSClass IteratorObject::protoClass_ = {
+ "Iterator.prototype",
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Iterator),
+ JS_NULL_CLASS_OPS,
+ &IteratorObjectClassSpec,
+};
+
+// Set up WrapForValidIteratorObject class and its prototype.
+static const JSFunctionSpec wrap_for_valid_iterator_methods[] = {
+ JS_SELF_HOSTED_FN("next", "WrapForValidIteratorNext", 1, 0),
+ JS_SELF_HOSTED_FN("return", "WrapForValidIteratorReturn", 1, 0),
+ JS_SELF_HOSTED_FN("throw", "WrapForValidIteratorThrow", 1, 0),
+ JS_FS_END,
+};
+
+static const JSClass WrapForValidIteratorPrototypeClass = {
+ "Wrap For Valid Iterator", 0};
+
+const JSClass WrapForValidIteratorObject::class_ = {
+ "Wrap For Valid Iterator",
+ JSCLASS_HAS_RESERVED_SLOTS(WrapForValidIteratorObject::SlotCount),
+};
+
+/* static */
+NativeObject* GlobalObject::getOrCreateWrapForValidIteratorPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ return MaybeNativeObject(getOrCreateBuiltinProto(
+ cx, global, ProtoKind::WrapForValidIteratorProto,
+ Handle<JSAtom*>(nullptr),
+ initObjectIteratorProto<ProtoKind::WrapForValidIteratorProto,
+ &WrapForValidIteratorPrototypeClass,
+ wrap_for_valid_iterator_methods>));
+}
+
+WrapForValidIteratorObject* js::NewWrapForValidIterator(JSContext* cx) {
+ RootedObject proto(cx, GlobalObject::getOrCreateWrapForValidIteratorPrototype(
+ cx, cx->global()));
+ if (!proto) {
+ return nullptr;
+ }
+ return NewObjectWithGivenProto<WrapForValidIteratorObject>(cx, proto);
+}
+
+// Common iterator object returned by Iterator Helper methods.
+static const JSFunctionSpec iterator_helper_methods[] = {
+ JS_SELF_HOSTED_FN("next", "IteratorHelperNext", 1, 0),
+ JS_SELF_HOSTED_FN("return", "IteratorHelperReturn", 1, 0),
+ JS_SELF_HOSTED_FN("throw", "IteratorHelperThrow", 1, 0), JS_FS_END};
+
+static const JSClass IteratorHelperPrototypeClass = {"Iterator Helper", 0};
+
+const JSClass IteratorHelperObject::class_ = {
+ "Iterator Helper",
+ JSCLASS_HAS_RESERVED_SLOTS(IteratorHelperObject::SlotCount),
+};
+
+/* static */
+NativeObject* GlobalObject::getOrCreateIteratorHelperPrototype(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ return MaybeNativeObject(getOrCreateBuiltinProto(
+ cx, global, ProtoKind::IteratorHelperProto, Handle<JSAtom*>(nullptr),
+ initObjectIteratorProto<ProtoKind::IteratorHelperProto,
+ &IteratorHelperPrototypeClass,
+ iterator_helper_methods>));
+}
+
+IteratorHelperObject* js::NewIteratorHelper(JSContext* cx) {
+ RootedObject proto(
+ cx, GlobalObject::getOrCreateIteratorHelperPrototype(cx, cx->global()));
+ if (!proto) {
+ return nullptr;
+ }
+ return NewObjectWithGivenProto<IteratorHelperObject>(cx, proto);
+}
+
+bool js::IterableToArray(JSContext* cx, HandleValue iterable,
+ MutableHandle<ArrayObject*> array) {
+ JS::ForOfIterator iterator(cx);
+ if (!iterator.init(iterable, JS::ForOfIterator::ThrowOnNonIterable)) {
+ return false;
+ }
+
+ array.set(NewDenseEmptyArray(cx));
+ if (!array) {
+ return false;
+ }
+
+ RootedValue nextValue(cx);
+ while (true) {
+ bool done;
+ if (!iterator.next(&nextValue, &done)) {
+ return false;
+ }
+ if (done) {
+ break;
+ }
+
+ if (!NewbornArrayPush(cx, array, nextValue)) {
+ return false;
+ }
+ }
+ return true;
+}
diff --git a/js/src/vm/Iteration.h b/js/src/vm/Iteration.h
new file mode 100644
index 0000000000..a62dac6b84
--- /dev/null
+++ b/js/src/vm/Iteration.h
@@ -0,0 +1,794 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Iteration_h
+#define vm_Iteration_h
+
+/*
+ * JavaScript iterators.
+ */
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/MemoryReporting.h"
+
+#include "builtin/SelfHostingDefines.h"
+#include "gc/Barrier.h"
+#include "vm/NativeObject.h"
+
+/*
+ * [SMDOC] For-in enumeration
+ *
+ * A for-in loop in JS iterates over the string-valued, enumerable
+ * property keys of an object and its prototype chain. The order in
+ * which keys appear is specified to the extent that implementations
+ * historically agreed, and implementation-defined beyond that. See
+ * https://tc39.es/ecma262/#sec-enumerate-object-properties for the
+ * gory details. Each key appears only once in the enumeration.
+ *
+ * We enumerate properties using PropertyEnumerator, which creates an
+ * ordered list of PropertyKeys, using ShapePropertyIter for native
+ * objects and calling enumerate hooks where necessary. This list is
+ * used to create a NativeIterator, which contains (among other
+ * things) a trailing array of strings representing the property keys
+ * of the object, and a cursor pointing into that array. This
+ * NativeIterator is wrapped in a PropertyIteratorObject, which is
+ * pushed by JSOp::Iter and used by JSOp::MoreIter and JSOp::EndIter.
+ *
+ * While active, a NativeIterator is registered in a doubly linked
+ * list, rooted in the compartment. When any property is deleted from
+ * an object, this list is used to remove the deleted property from
+ * any active enumerations. See SuppressDeletedProperty. This slows
+ * down deletion but speeds up enumeration, which is generally a good
+ * tradeoff.
+ *
+ * In many cases, objects with the same shape will have the same set
+ * of property keys. (The most common exception is objects with dense
+ * elements, which can be added or removed without changing the shape
+ * of the object.) In such cases, we can reuse an existing iterator by
+ * storing a pointer to the PropertyIteratorObject in the shape's
+ * |cache_| pointer. Before reusing an iterator, we have to verify
+ * that the prototype chain has not changed and no dense elements have
+ * been added, which is done by storing a trailing array of prototype
+ * shapes in the NativeIterator and comparing it against the shapes of
+ * the prototype chain.
+ *
+ * One of the most frequent uses of for-in loops is in loops that look
+ * like this, which iterate over each property of an object and do
+ * something with those values:
+ * for (var key in obj) {
+ * if (obj.hasOwnProperty(key)) {
+ * doSomethingWith(obj[key]);
+ * }
+ * }
+ * Most objects don't have any enumerable properties on the prototype
+ * chain. In such cases, we can speed up property access inside the
+ * loop by precomputing some information and storing it in the
+ * iterator. When we see a pattern like this in Ion, we generate a
+ * call to GetIteratorWithIndices instead of GetIterator. In this
+ * case, in addition to the list of property keys, PropertyEnumerator
+ * will try to generate a list of corresponding PropertyIndex values,
+ * which represent the location of the own property key in the object
+ * (fixed slot/dynamic slot/dense element + offset). This list will be
+ * stored in NativeIterator as yet another trailing array. When
+ * present, it can be used by Ion code to speed up property access
+ * inside for-in loops. See OptimizeIteratorIndices in
+ * IonAnalysis.cpp.
+ */
+
+namespace js {
+
+class ArrayObject;
+class PlainObject;
+class PropertyIteratorObject;
+
+// A PropertyIndex stores information about the location of an own data
+// property in a format that can be stored in a NativeIterator and consumed by
+// jitcode to access properties without needing to use the megamorphic cache.
+struct PropertyIndex {
+ private:
+ uint32_t asBits_;
+
+ public:
+ enum class Kind : uint32_t { DynamicSlot, FixedSlot, Element, Invalid };
+
+ PropertyIndex(Kind kind, uint32_t index) : asBits_(encode(kind, index)) {}
+
+ static PropertyIndex Invalid() { return PropertyIndex(Kind::Invalid, 0); }
+
+ static PropertyIndex ForElement(uint32_t index) {
+ return PropertyIndex(Kind::Element, index);
+ }
+
+ static PropertyIndex ForSlot(NativeObject* obj, uint32_t index) {
+ if (index < obj->numFixedSlots()) {
+ return PropertyIndex(Kind::FixedSlot, index);
+ } else {
+ return PropertyIndex(Kind::DynamicSlot, index - obj->numFixedSlots());
+ }
+ }
+
+ static constexpr uint32_t KindBits = 2;
+
+ static constexpr uint32_t IndexBits = 32 - KindBits;
+ static constexpr uint32_t IndexLimit = 1 << IndexBits;
+ static constexpr uint32_t IndexMask = (1 << IndexBits) - 1;
+
+ static constexpr uint32_t KindShift = IndexBits;
+
+ static_assert(NativeObject::MAX_FIXED_SLOTS < IndexLimit);
+ static_assert(NativeObject::MAX_SLOTS_COUNT < IndexLimit);
+ static_assert(NativeObject::MAX_DENSE_ELEMENTS_COUNT < IndexLimit);
+
+ private:
+ uint32_t encode(Kind kind, uint32_t index) {
+ MOZ_ASSERT(index < IndexLimit);
+ return (uint32_t(kind) << KindShift) | index;
+ }
+
+ public:
+ Kind kind() const { return Kind(asBits_ >> KindShift); }
+ uint32_t index() const { return asBits_ & IndexMask; }
+};
+
+using PropertyIndexVector = js::Vector<PropertyIndex, 8, js::TempAllocPolicy>;
+
+struct NativeIterator;
+
+class NativeIteratorListNode {
+ protected:
+ // While in compartment->enumerators, these form a doubly linked list.
+ NativeIteratorListNode* prev_ = nullptr;
+ NativeIteratorListNode* next_ = nullptr;
+
+ public:
+ NativeIteratorListNode* prev() { return prev_; }
+ NativeIteratorListNode* next() { return next_; }
+
+ void setPrev(NativeIteratorListNode* prev) { prev_ = prev; }
+ void setNext(NativeIteratorListNode* next) { next_ = next; }
+
+ static constexpr size_t offsetOfNext() {
+ return offsetof(NativeIteratorListNode, next_);
+ }
+
+ static constexpr size_t offsetOfPrev() {
+ return offsetof(NativeIteratorListNode, prev_);
+ }
+
+ private:
+ NativeIterator* asNativeIterator() {
+ return reinterpret_cast<NativeIterator*>(this);
+ }
+
+ friend class NativeIteratorListIter;
+};
+
+class NativeIteratorListHead : public NativeIteratorListNode {
+ private:
+ // Initialize a |Compartment::enumerators| sentinel.
+ NativeIteratorListHead() { prev_ = next_ = this; }
+ friend class JS::Compartment;
+};
+
+class NativeIteratorListIter {
+ private:
+ NativeIteratorListHead* head_;
+ NativeIteratorListNode* curr_;
+
+ public:
+ explicit NativeIteratorListIter(NativeIteratorListHead* head)
+ : head_(head), curr_(head->next()) {}
+
+ bool done() const { return curr_ == head_; }
+
+ NativeIterator* next() {
+ MOZ_ASSERT(!done());
+ NativeIterator* result = curr_->asNativeIterator();
+ curr_ = curr_->next();
+ return result;
+ }
+};
+
+// If an object only has own data properties, we can store a list of
+// PropertyIndex that can be used in Ion to more efficiently access those
+// properties in cases like `for (var key in obj) { ...obj[key]... }`.
+enum class NativeIteratorIndices : uint32_t {
+ // The object being iterated does not support indices.
+ Unavailable = 0,
+
+ // The object being iterated supports indices, but none have been
+ // allocated, because it has not yet been iterated by Ion code that
+ // can use indices-based access.
+ AvailableOnRequest = 1,
+
+ // The object being iterated had indices allocated, but they were
+ // disabled due to a deleted property.
+ Disabled = 2,
+
+ // The object being iterated had indices allocated, and they are
+ // still valid.
+ Valid = 3
+};
+
+struct NativeIterator : public NativeIteratorListNode {
+ private:
+ // Object being iterated. Non-null except in NativeIterator sentinels,
+ // the empty iterator singleton (for iterating |null| or |undefined|), and
+ // inactive iterators.
+ GCPtr<JSObject*> objectBeingIterated_ = {};
+
+ // Internal iterator object.
+ const GCPtr<JSObject*> iterObj_ = {};
+
+ // The end of GCPtr<Shape*>s that appear directly after |this|, as part of an
+ // overall allocation that stores |*this|, shapes, iterated strings, and maybe
+ // indices. Once this has been fully initialized, it also equals the start of
+ // iterated strings.
+ GCPtr<Shape*>* shapesEnd_; // initialized by constructor
+
+ // The next property, pointing into an array of strings directly after any
+ // GCPtr<Shape*>s that appear directly after |*this|, as part of an overall
+ // allocation that stores |*this|, shapes, iterated strings, and maybe
+ // indices.
+ GCPtr<JSLinearString*>* propertyCursor_; // initialized by constructor
+
+ // The limit/end of properties to iterate. Once |this| has been fully
+ // initialized, it also equals the start of indices, if indices are present,
+ // or the end of the full allocation storing |*this|, shapes, and strings, if
+ // indices are not present. Beware! This value may change as properties are
+ // deleted from the observed object.
+ GCPtr<JSLinearString*>* propertiesEnd_; // initialized by constructor
+
+ HashNumber shapesHash_; // initialized by constructor
+
+ public:
+ // For cacheable native iterators, whether the iterator is currently
+ // active. Not serialized by XDR.
+ struct Flags {
+ // This flag is set when all shapes and properties associated with this
+ // NativeIterator have been initialized, such that |shapesEnd_|, in
+ // addition to being the end of shapes, is also the beginning of
+ // properties.
+ //
+ // This flag is only *not* set when a NativeIterator is in the process
+ // of being constructed. At such time |shapesEnd_| accounts only for
+ // shapes that have been initialized -- potentially none of them.
+ // Instead, |propertyCursor_| is initialized to the ultimate/actual
+ // start of properties and must be used instead of |propertiesBegin()|,
+ // which asserts that this flag is present to guard against misuse.
+ static constexpr uint32_t Initialized = 0x1;
+
+ // This flag indicates that this NativeIterator is currently being used
+ // to enumerate an object's properties and has not yet been closed.
+ static constexpr uint32_t Active = 0x2;
+
+ // This flag indicates that the object being enumerated by this
+ // |NativeIterator| had a property deleted from it before it was
+ // visited, forcing the properties array in this to be mutated to
+ // remove it.
+ static constexpr uint32_t HasUnvisitedPropertyDeletion = 0x4;
+
+ // Whether this is the shared empty iterator object used for iterating over
+ // null/undefined.
+ static constexpr uint32_t IsEmptyIteratorSingleton = 0x8;
+
+ // If any of these bits are set on a |NativeIterator|, it isn't
+ // currently reusable. (An active |NativeIterator| can't be stolen
+ // *right now*; a |NativeIterator| that's had its properties mutated
+ // can never be reused, because it would give incorrect results.)
+ static constexpr uint32_t NotReusable =
+ Active | HasUnvisitedPropertyDeletion;
+ };
+
+ private:
+ static constexpr uint32_t FlagsBits = 4;
+ static constexpr uint32_t IndicesBits = 2;
+
+ static constexpr uint32_t FlagsMask = (1 << FlagsBits) - 1;
+
+ static constexpr uint32_t PropCountShift = IndicesBits + FlagsBits;
+ static constexpr uint32_t PropCountBits = 32 - PropCountShift;
+
+ public:
+ static constexpr uint32_t IndicesShift = FlagsBits;
+ static constexpr uint32_t IndicesMask = ((1 << IndicesBits) - 1)
+ << IndicesShift;
+
+ static constexpr uint32_t PropCountLimit = 1 << PropCountBits;
+
+ private:
+ // Stores Flags bits and indices state in the lower bits and the initial
+ // property count above them.
+ uint32_t flagsAndCount_ = 0;
+
+#ifdef DEBUG
+ // If true, this iterator may contain indexed properties that came from
+ // objects on the prototype chain. This is used by certain debug assertions.
+ bool maybeHasIndexedPropertiesFromProto_ = false;
+#endif
+
+ // END OF PROPERTIES
+
+ // No further fields appear after here *in NativeIterator*, but this class is
+ // always allocated with space tacked on immediately after |this| to store
+ // shapes p to |shapesEnd_|, iterated property names after that up to
+ // |propertiesEnd_|, and maybe PropertyIndex values up to |indices_end()|.
+
+ public:
+ /**
+ * Initialize a NativeIterator properly allocated for |props.length()|
+ * properties and |numShapes| shapes. If |indices| is non-null, also
+ * allocates room for |indices.length()| PropertyIndex values. In this case,
+ * |indices.length()| must equal |props.length()|.
+ *
+ * Despite being a constructor, THIS FUNCTION CAN REPORT ERRORS. Users
+ * MUST set |*hadError = false| on entry and consider |*hadError| on return
+ * to mean this function failed.
+ */
+ NativeIterator(JSContext* cx, Handle<PropertyIteratorObject*> propIter,
+ Handle<JSObject*> objBeingIterated, HandleIdVector props,
+ bool supportsIndices, PropertyIndexVector* indices,
+ uint32_t numShapes, bool* hadError);
+
+ JSObject* objectBeingIterated() const { return objectBeingIterated_; }
+
+ void initObjectBeingIterated(JSObject& obj) {
+ MOZ_ASSERT(!objectBeingIterated_);
+ objectBeingIterated_.init(&obj);
+ }
+ void clearObjectBeingIterated() {
+ MOZ_ASSERT(objectBeingIterated_);
+ objectBeingIterated_ = nullptr;
+ }
+
+ GCPtr<Shape*>* shapesBegin() const {
+ static_assert(
+ alignof(GCPtr<Shape*>) <= alignof(NativeIterator),
+ "NativeIterator must be aligned to begin storing "
+ "GCPtr<Shape*>s immediately after it with no required padding");
+ const NativeIterator* immediatelyAfter = this + 1;
+ auto* afterNonConst = const_cast<NativeIterator*>(immediatelyAfter);
+ return reinterpret_cast<GCPtr<Shape*>*>(afterNonConst);
+ }
+
+ GCPtr<Shape*>* shapesEnd() const { return shapesEnd_; }
+
+ uint32_t shapeCount() const {
+ return mozilla::PointerRangeSize(shapesBegin(), shapesEnd());
+ }
+
+ GCPtr<JSLinearString*>* propertiesBegin() const {
+ static_assert(
+ alignof(GCPtr<Shape*>) >= alignof(GCPtr<JSLinearString*>),
+ "GCPtr<JSLinearString*>s for properties must be able to appear "
+ "directly after any GCPtr<Shape*>s after this NativeIterator, "
+ "with no padding space required for correct alignment");
+ static_assert(
+ alignof(NativeIterator) >= alignof(GCPtr<JSLinearString*>),
+ "GCPtr<JSLinearString*>s for properties must be able to appear "
+ "directly after this NativeIterator when no GCPtr<Shape*>s are "
+ "present, with no padding space required for correct "
+ "alignment");
+
+ // We *could* just check the assertion below if we wanted, but the
+ // incompletely-initialized NativeIterator case matters for so little
+ // code that we prefer not imposing the condition-check on every single
+ // user.
+ MOZ_ASSERT(isInitialized(),
+ "NativeIterator must be initialized, or else |shapesEnd_| "
+ "isn't necessarily the start of properties and instead "
+ "|propertyCursor_| is");
+
+ return reinterpret_cast<GCPtr<JSLinearString*>*>(shapesEnd_);
+ }
+
+ GCPtr<JSLinearString*>* propertiesEnd() const { return propertiesEnd_; }
+
+ GCPtr<JSLinearString*>* nextProperty() const { return propertyCursor_; }
+
+ PropertyIndex* indicesBegin() const {
+ // PropertyIndex must be able to be appear directly after the properties
+ // array, with no padding required for correct alignment.
+ static_assert(alignof(GCPtr<JSLinearString*>) >= alignof(PropertyIndex));
+ return reinterpret_cast<PropertyIndex*>(propertiesEnd_);
+ }
+
+ PropertyIndex* indicesEnd() const {
+ MOZ_ASSERT(indicesState() == NativeIteratorIndices::Valid);
+ return indicesBegin() + numKeys() * sizeof(PropertyIndex);
+ }
+
+ MOZ_ALWAYS_INLINE JS::Value nextIteratedValueAndAdvance() {
+ if (propertyCursor_ >= propertiesEnd_) {
+ MOZ_ASSERT(propertyCursor_ == propertiesEnd_);
+ return JS::MagicValue(JS_NO_ITER_VALUE);
+ }
+
+ JSLinearString* str = *propertyCursor_;
+ incCursor();
+ return JS::StringValue(str);
+ }
+
+ void resetPropertyCursorForReuse() {
+ MOZ_ASSERT(isInitialized());
+
+ // This function is called unconditionally on IteratorClose, so
+ // unvisited properties might have been deleted, so we can't assert
+ // this NativeIterator is reusable. (Should we not bother resetting
+ // the cursor in that case?)
+
+ // Note: JIT code inlines |propertyCursor_| resetting when an iterator
+ // ends: see |CodeGenerator::visitIteratorEnd|.
+ propertyCursor_ = propertiesBegin();
+ }
+
+ bool previousPropertyWas(JS::Handle<JSLinearString*> str) {
+ MOZ_ASSERT(isInitialized());
+ return propertyCursor_ > propertiesBegin() && propertyCursor_[-1] == str;
+ }
+
+ size_t numKeys() const {
+ return mozilla::PointerRangeSize(propertiesBegin(), propertiesEnd());
+ }
+
+ void trimLastProperty() {
+ MOZ_ASSERT(isInitialized());
+ propertiesEnd_--;
+
+ // This invokes the pre barrier on this property, since it's no longer
+ // going to be marked, and it ensures that any existing remembered set
+ // entry will be dropped.
+ *propertiesEnd_ = nullptr;
+
+ // Indices are no longer valid.
+ disableIndices();
+ }
+
+ JSObject* iterObj() const { return iterObj_; }
+ GCPtr<JSLinearString*>* currentProperty() const {
+ MOZ_ASSERT(propertyCursor_ < propertiesEnd());
+ return propertyCursor_;
+ }
+
+ void incCursor() {
+ MOZ_ASSERT(isInitialized());
+ propertyCursor_++;
+ }
+
+ HashNumber shapesHash() const { return shapesHash_; }
+
+ bool isInitialized() const { return flags() & Flags::Initialized; }
+
+ size_t allocationSize() const;
+
+#ifdef DEBUG
+ void setMaybeHasIndexedPropertiesFromProto() {
+ maybeHasIndexedPropertiesFromProto_ = true;
+ }
+ bool maybeHasIndexedPropertiesFromProto() const {
+ return maybeHasIndexedPropertiesFromProto_;
+ }
+#endif
+
+ private:
+ uint32_t flags() const { return flagsAndCount_ & FlagsMask; }
+
+ NativeIteratorIndices indicesState() const {
+ return NativeIteratorIndices((flagsAndCount_ & IndicesMask) >>
+ IndicesShift);
+ }
+
+ uint32_t initialPropertyCount() const {
+ return flagsAndCount_ >> PropCountShift;
+ }
+
+ static uint32_t initialFlagsAndCount(uint32_t count) {
+ // No flags are initially set.
+ MOZ_ASSERT(count < PropCountLimit);
+ return count << PropCountShift;
+ }
+
+ void setFlags(uint32_t flags) {
+ MOZ_ASSERT((flags & ~FlagsMask) == 0);
+ flagsAndCount_ = (flagsAndCount_ & ~FlagsMask) | flags;
+ }
+
+ void setIndicesState(NativeIteratorIndices indices) {
+ uint32_t indicesBits = uint32_t(indices) << IndicesShift;
+ flagsAndCount_ = (flagsAndCount_ & ~IndicesMask) | indicesBits;
+ }
+
+ bool indicesAllocated() const {
+ return indicesState() >= NativeIteratorIndices::Disabled;
+ }
+
+ void markInitialized() {
+ MOZ_ASSERT(flags() == 0);
+ setFlags(Flags::Initialized);
+ }
+
+ bool isUnlinked() const { return !prev_ && !next_; }
+
+ public:
+ // Whether this is the shared empty iterator object used for iterating over
+ // null/undefined.
+ bool isEmptyIteratorSingleton() const {
+ // Note: equivalent code is inlined in MacroAssembler::iteratorClose.
+ bool res = flags() & Flags::IsEmptyIteratorSingleton;
+ MOZ_ASSERT_IF(
+ res, flags() == (Flags::Initialized | Flags::IsEmptyIteratorSingleton));
+ MOZ_ASSERT_IF(res, !objectBeingIterated_);
+ MOZ_ASSERT_IF(res, initialPropertyCount() == 0);
+ MOZ_ASSERT_IF(res, shapeCount() == 0);
+ MOZ_ASSERT_IF(res, isUnlinked());
+ return res;
+ }
+ void markEmptyIteratorSingleton() {
+ flagsAndCount_ |= Flags::IsEmptyIteratorSingleton;
+
+ // isEmptyIteratorSingleton() has various debug assertions.
+ MOZ_ASSERT(isEmptyIteratorSingleton());
+ }
+
+ bool isActive() const {
+ MOZ_ASSERT(isInitialized());
+
+ return flags() & Flags::Active;
+ }
+
+ void markActive() {
+ MOZ_ASSERT(isInitialized());
+ MOZ_ASSERT(!isEmptyIteratorSingleton());
+
+ flagsAndCount_ |= Flags::Active;
+ }
+
+ void markInactive() {
+ MOZ_ASSERT(isInitialized());
+ MOZ_ASSERT(!isEmptyIteratorSingleton());
+
+ flagsAndCount_ &= ~Flags::Active;
+ }
+
+ bool isReusable() const {
+ MOZ_ASSERT(isInitialized());
+
+ // Cached NativeIterators are reusable if they're not currently active
+ // and their properties array hasn't been mutated, i.e. if only
+ // |Flags::Initialized| is set. Using |Flags::NotReusable| to test
+ // would also work, but this formulation is safer against memory
+ // corruption.
+ return flags() == Flags::Initialized;
+ }
+
+ void markHasUnvisitedPropertyDeletion() {
+ MOZ_ASSERT(isInitialized());
+ MOZ_ASSERT(!isEmptyIteratorSingleton());
+
+ flagsAndCount_ |= Flags::HasUnvisitedPropertyDeletion;
+ }
+
+ bool hasValidIndices() const {
+ return indicesState() == NativeIteratorIndices::Valid;
+ }
+
+ bool indicesAvailableOnRequest() const {
+ return indicesState() == NativeIteratorIndices::AvailableOnRequest;
+ }
+
+ void disableIndices() {
+ // If we have allocated indices, set the state to Disabled.
+ // This will ensure that we don't use them, but we still
+ // free them correctly.
+ if (indicesState() == NativeIteratorIndices::Valid) {
+ setIndicesState(NativeIteratorIndices::Disabled);
+ }
+ }
+
+ void link(NativeIteratorListNode* other) {
+ MOZ_ASSERT(isInitialized());
+
+ // The shared iterator used for for-in with null/undefined is immutable and
+ // shouldn't be linked.
+ MOZ_ASSERT(!isEmptyIteratorSingleton());
+
+ // A NativeIterator cannot appear in the enumerator list twice.
+ MOZ_ASSERT(isUnlinked());
+
+ setNext(other);
+ setPrev(other->prev());
+
+ other->prev()->setNext(this);
+ other->setPrev(this);
+ }
+ void unlink() {
+ MOZ_ASSERT(isInitialized());
+ MOZ_ASSERT(!isEmptyIteratorSingleton());
+
+ next()->setPrev(prev());
+ prev()->setNext(next());
+ setNext(nullptr);
+ setPrev(nullptr);
+ }
+
+ void trace(JSTracer* trc);
+
+ static constexpr size_t offsetOfObjectBeingIterated() {
+ return offsetof(NativeIterator, objectBeingIterated_);
+ }
+
+ static constexpr size_t offsetOfShapesEnd() {
+ return offsetof(NativeIterator, shapesEnd_);
+ }
+
+ static constexpr size_t offsetOfPropertyCursor() {
+ return offsetof(NativeIterator, propertyCursor_);
+ }
+
+ static constexpr size_t offsetOfPropertiesEnd() {
+ return offsetof(NativeIterator, propertiesEnd_);
+ }
+
+ static constexpr size_t offsetOfFlagsAndCount() {
+ return offsetof(NativeIterator, flagsAndCount_);
+ }
+
+ static constexpr size_t offsetOfFirstShape() {
+ // Shapes are stored directly after |this|.
+ return sizeof(NativeIterator);
+ }
+};
+
+class PropertyIteratorObject : public NativeObject {
+ static const JSClassOps classOps_;
+
+ enum { IteratorSlot, SlotCount };
+
+ public:
+ static const JSClass class_;
+
+ NativeIterator* getNativeIterator() const {
+ return maybePtrFromReservedSlot<NativeIterator>(IteratorSlot);
+ }
+ void initNativeIterator(js::NativeIterator* ni) {
+ initReservedSlot(IteratorSlot, PrivateValue(ni));
+ }
+
+ size_t sizeOfMisc(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ static size_t offsetOfIteratorSlot() {
+ return getFixedSlotOffset(IteratorSlot);
+ }
+
+ private:
+ static void trace(JSTracer* trc, JSObject* obj);
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+};
+
+class ArrayIteratorObject : public NativeObject {
+ public:
+ static const JSClass class_;
+};
+
+ArrayIteratorObject* NewArrayIteratorTemplate(JSContext* cx);
+ArrayIteratorObject* NewArrayIterator(JSContext* cx);
+
+class StringIteratorObject : public NativeObject {
+ public:
+ static const JSClass class_;
+};
+
+StringIteratorObject* NewStringIteratorTemplate(JSContext* cx);
+StringIteratorObject* NewStringIterator(JSContext* cx);
+
+class RegExpStringIteratorObject : public NativeObject {
+ public:
+ static const JSClass class_;
+};
+
+RegExpStringIteratorObject* NewRegExpStringIteratorTemplate(JSContext* cx);
+RegExpStringIteratorObject* NewRegExpStringIterator(JSContext* cx);
+
+[[nodiscard]] bool EnumerateProperties(JSContext* cx, HandleObject obj,
+ MutableHandleIdVector props);
+
+PropertyIteratorObject* LookupInIteratorCache(JSContext* cx, HandleObject obj);
+
+PropertyIteratorObject* GetIterator(JSContext* cx, HandleObject obj);
+PropertyIteratorObject* GetIteratorWithIndices(JSContext* cx, HandleObject obj);
+
+PropertyIteratorObject* ValueToIterator(JSContext* cx, HandleValue vp);
+
+void CloseIterator(JSObject* obj);
+
+bool IteratorCloseForException(JSContext* cx, HandleObject obj);
+
+void UnwindIteratorForUncatchableException(JSObject* obj);
+
+extern bool SuppressDeletedProperty(JSContext* cx, HandleObject obj, jsid id);
+
+extern bool SuppressDeletedElement(JSContext* cx, HandleObject obj,
+ uint32_t index);
+
+#ifdef DEBUG
+extern void AssertDenseElementsNotIterated(NativeObject* obj);
+#else
+inline void AssertDenseElementsNotIterated(NativeObject* obj) {}
+#endif
+
+/*
+ * IteratorMore() returns the next iteration value. If no value is available,
+ * MagicValue(JS_NO_ITER_VALUE) is returned.
+ */
+inline Value IteratorMore(JSObject* iterobj) {
+ NativeIterator* ni =
+ iterobj->as<PropertyIteratorObject>().getNativeIterator();
+ return ni->nextIteratedValueAndAdvance();
+}
+
+/*
+ * Create an object of the form { value: VALUE, done: DONE }.
+ * ES 2017 draft 7.4.7.
+ */
+extern PlainObject* CreateIterResultObject(JSContext* cx, HandleValue value,
+ bool done);
+
+/*
+ * Global Iterator constructor.
+ * Iterator Helpers proposal 2.1.3.
+ */
+class IteratorObject : public NativeObject {
+ public:
+ static const JSClass class_;
+ static const JSClass protoClass_;
+};
+
+/*
+ * Wrapper for iterators created via Iterator.from.
+ * Iterator Helpers proposal 2.1.3.3.1.1.
+ */
+class WrapForValidIteratorObject : public NativeObject {
+ public:
+ static const JSClass class_;
+
+ enum { IteratedSlot, SlotCount };
+
+ static_assert(
+ IteratedSlot == ITERATED_SLOT,
+ "IteratedSlot must match self-hosting define for iterated object slot.");
+};
+
+WrapForValidIteratorObject* NewWrapForValidIterator(JSContext* cx);
+
+/*
+ * Generator-esque object returned by Iterator Helper methods.
+ */
+class IteratorHelperObject : public NativeObject {
+ public:
+ static const JSClass class_;
+
+ enum {
+ // The implementation (an instance of one of the generators in
+ // builtin/Iterator.js).
+ // Never null.
+ GeneratorSlot,
+
+ SlotCount,
+ };
+
+ static_assert(GeneratorSlot == ITERATOR_HELPER_GENERATOR_SLOT,
+ "GeneratorSlot must match self-hosting define for generator "
+ "object slot.");
+};
+
+IteratorHelperObject* NewIteratorHelper(JSContext* cx);
+
+bool IterableToArray(JSContext* cx, HandleValue iterable,
+ MutableHandle<ArrayObject*> array);
+
+} /* namespace js */
+
+#endif /* vm_Iteration_h */
diff --git a/js/src/vm/JSAtom-inl.h b/js/src/vm/JSAtom-inl.h
new file mode 100644
index 0000000000..5e13b6e058
--- /dev/null
+++ b/js/src/vm/JSAtom-inl.h
@@ -0,0 +1,157 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_JSAtom_inl_h
+#define vm_JSAtom_inl_h
+
+#include "vm/JSAtom.h"
+
+#include "mozilla/RangedPtr.h"
+
+#include "jsnum.h"
+
+#include "gc/MaybeRooted.h"
+#include "vm/JSAtomState.h"
+#include "vm/JSContext.h"
+#include "vm/StringType.h"
+
+namespace js {
+
+MOZ_ALWAYS_INLINE jsid AtomToId(JSAtom* atom) {
+ static_assert(JS::PropertyKey::IntMin == 0);
+
+ uint32_t index;
+ if (atom->isIndex(&index) && index <= JS::PropertyKey::IntMax) {
+ return JS::PropertyKey::Int(int32_t(index));
+ }
+
+ return JS::PropertyKey::NonIntAtom(atom);
+}
+
+// Use the NameToId method instead!
+inline jsid AtomToId(PropertyName* name) = delete;
+
+template <AllowGC allowGC>
+extern bool PrimitiveValueToIdSlow(
+ JSContext* cx, typename MaybeRooted<JS::Value, allowGC>::HandleType v,
+ typename MaybeRooted<jsid, allowGC>::MutableHandleType idp);
+
+template <AllowGC allowGC>
+inline bool PrimitiveValueToId(
+ JSContext* cx, typename MaybeRooted<Value, allowGC>::HandleType v,
+ typename MaybeRooted<jsid, allowGC>::MutableHandleType idp) {
+ // Non-primitive values should call ToPropertyKey.
+ MOZ_ASSERT(v.isPrimitive());
+
+ if (v.isString()) {
+ JSAtom* atom;
+ if (v.toString()->isAtom()) {
+ atom = &v.toString()->asAtom();
+ } else {
+ atom = AtomizeString(cx, v.toString());
+ if (!atom) {
+ if constexpr (!allowGC) {
+ cx->recoverFromOutOfMemory();
+ }
+ return false;
+ }
+ }
+ idp.set(AtomToId(atom));
+ return true;
+ }
+
+ if (v.isInt32()) {
+ if (PropertyKey::fitsInInt(v.toInt32())) {
+ idp.set(PropertyKey::Int(v.toInt32()));
+ return true;
+ }
+ } else if (v.isSymbol()) {
+ idp.set(PropertyKey::Symbol(v.toSymbol()));
+ return true;
+ }
+
+ return PrimitiveValueToIdSlow<allowGC>(cx, v, idp);
+}
+
+/*
+ * Write out character representing |index| to the memory just before |end|.
+ * Thus |*end| is not touched, but |end[-1]| and earlier are modified as
+ * appropriate. There must be at least js::UINT32_CHAR_BUFFER_LENGTH elements
+ * before |end| to avoid buffer underflow. The start of the characters written
+ * is returned and is necessarily before |end|.
+ */
+template <typename T>
+inline mozilla::RangedPtr<T> BackfillIndexInCharBuffer(
+ uint32_t index, mozilla::RangedPtr<T> end) {
+#ifdef DEBUG
+ /*
+ * Assert that the buffer we're filling will hold as many characters as we
+ * could write out, by dereferencing the index that would hold the most
+ * significant digit.
+ */
+ (void)*(end - UINT32_CHAR_BUFFER_LENGTH);
+#endif
+
+ do {
+ uint32_t next = index / 10, digit = index % 10;
+ *--end = '0' + digit;
+ index = next;
+ } while (index > 0);
+
+ return end;
+}
+
+bool IndexToIdSlow(JSContext* cx, uint32_t index, MutableHandleId idp);
+
+inline bool IndexToId(JSContext* cx, uint32_t index, MutableHandleId idp) {
+ if (index <= PropertyKey::IntMax) {
+ idp.set(PropertyKey::Int(index));
+ return true;
+ }
+
+ return IndexToIdSlow(cx, index, idp);
+}
+
+static MOZ_ALWAYS_INLINE JSLinearString* IdToString(JSContext* cx, jsid id) {
+ if (id.isString()) {
+ return id.toAtom();
+ }
+
+ if (MOZ_LIKELY(id.isInt())) {
+ return Int32ToString<CanGC>(cx, id.toInt());
+ }
+
+ RootedValue idv(cx, IdToValue(id));
+ JSString* str = ToStringSlow<CanGC>(cx, idv);
+ if (!str) {
+ return nullptr;
+ }
+
+ return str->ensureLinear(cx);
+}
+
+inline Handle<PropertyName*> TypeName(JSType type, const JSAtomState& names) {
+ MOZ_ASSERT(type < JSTYPE_LIMIT);
+ static_assert(offsetof(JSAtomState, undefined) +
+ JSTYPE_LIMIT * sizeof(ImmutableTenuredPtr<PropertyName*>) <=
+ sizeof(JSAtomState));
+ static_assert(JSTYPE_UNDEFINED == 0);
+ return (&names.undefined)[type];
+}
+
+inline Handle<PropertyName*> ClassName(JSProtoKey key, JSAtomState& atomState) {
+ MOZ_ASSERT(key < JSProto_LIMIT);
+ static_assert(offsetof(JSAtomState, Null) +
+ JSProto_LIMIT *
+ sizeof(ImmutableTenuredPtr<PropertyName*>) <=
+ sizeof(JSAtomState));
+ static_assert(JSProto_Null == 0);
+ return (&atomState.Null)[key];
+}
+
+} // namespace js
+
+#endif /* vm_JSAtom_inl_h */
diff --git a/js/src/vm/JSAtom.cpp b/js/src/vm/JSAtom.cpp
new file mode 100644
index 0000000000..6e4a2e080a
--- /dev/null
+++ b/js/src/vm/JSAtom.cpp
@@ -0,0 +1,1148 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * JS atom table.
+ */
+
+#include "vm/JSAtom-inl.h"
+
+#include "mozilla/HashFunctions.h" // mozilla::HashStringKnownLength
+#include "mozilla/RangedPtr.h"
+
+#include <iterator>
+#include <string.h>
+
+#include "jstypes.h"
+
+#include "frontend/CompilationStencil.h"
+#include "gc/GC.h"
+#include "gc/Marking.h"
+#include "gc/MaybeRooted.h"
+#include "js/CharacterEncoding.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/Symbol.h"
+#include "util/Text.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/StaticStrings.h"
+#include "vm/StringType.h"
+#include "vm/SymbolType.h"
+#include "vm/WellKnownAtom.h" // js_*_str
+
+#ifdef ENABLE_RECORD_TUPLE
+# include "vm/RecordType.h"
+# include "vm/TupleType.h"
+#endif
+
+#include "gc/AtomMarking-inl.h"
+#include "vm/JSContext-inl.h"
+#include "vm/Realm-inl.h"
+#include "vm/StringType-inl.h"
+
+using namespace js;
+
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::RangedPtr;
+
+template <typename CharT>
+extern void InflateUTF8CharsToBuffer(const JS::UTF8Chars src, CharT* dst,
+ size_t dstLen,
+ JS::SmallestEncoding encoding);
+
+template <typename CharT>
+extern bool UTF8EqualsChars(const JS::UTF8Chars utf8, const CharT* chars);
+
+extern bool GetUTF8AtomizationData(JSContext* cx, const JS::UTF8Chars utf8,
+ size_t* outlen,
+ JS::SmallestEncoding* encoding,
+ HashNumber* hashNum);
+
+struct js::AtomHasher::Lookup {
+ union {
+ const JS::Latin1Char* latin1Chars;
+ const char16_t* twoByteChars;
+ const char* utf8Bytes;
+ };
+ enum { TwoByteChar, Latin1, UTF8 } type;
+ size_t length;
+ size_t byteLength;
+ const JSAtom* atom; /* Optional. */
+ JS::AutoCheckCannotGC nogc;
+
+ HashNumber hash;
+
+ MOZ_ALWAYS_INLINE Lookup(const char* utf8Bytes, size_t byteLen, size_t length,
+ HashNumber hash)
+ : utf8Bytes(utf8Bytes),
+ type(UTF8),
+ length(length),
+ byteLength(byteLen),
+ atom(nullptr),
+ hash(hash) {}
+
+ MOZ_ALWAYS_INLINE Lookup(const char16_t* chars, size_t length)
+ : twoByteChars(chars),
+ type(TwoByteChar),
+ length(length),
+ atom(nullptr),
+ hash(mozilla::HashString(chars, length)) {}
+
+ MOZ_ALWAYS_INLINE Lookup(const JS::Latin1Char* chars, size_t length)
+ : latin1Chars(chars),
+ type(Latin1),
+ length(length),
+ atom(nullptr),
+ hash(mozilla::HashString(chars, length)) {}
+
+ MOZ_ALWAYS_INLINE Lookup(HashNumber hash, const char16_t* chars,
+ size_t length)
+ : twoByteChars(chars),
+ type(TwoByteChar),
+ length(length),
+ atom(nullptr),
+ hash(hash) {
+ MOZ_ASSERT(hash == mozilla::HashString(chars, length));
+ }
+
+ MOZ_ALWAYS_INLINE Lookup(HashNumber hash, const JS::Latin1Char* chars,
+ size_t length)
+ : latin1Chars(chars),
+ type(Latin1),
+ length(length),
+ atom(nullptr),
+ hash(hash) {
+ MOZ_ASSERT(hash == mozilla::HashString(chars, length));
+ }
+
+ inline explicit Lookup(const JSAtom* atom)
+ : type(atom->hasLatin1Chars() ? Latin1 : TwoByteChar),
+ length(atom->length()),
+ atom(atom),
+ hash(atom->hash()) {
+ if (type == Latin1) {
+ latin1Chars = atom->latin1Chars(nogc);
+ MOZ_ASSERT(mozilla::HashString(latin1Chars, length) == hash);
+ } else {
+ MOZ_ASSERT(type == TwoByteChar);
+ twoByteChars = atom->twoByteChars(nogc);
+ MOZ_ASSERT(mozilla::HashString(twoByteChars, length) == hash);
+ }
+ }
+};
+
+inline HashNumber js::AtomHasher::hash(const Lookup& l) { return l.hash; }
+
+MOZ_ALWAYS_INLINE bool js::AtomHasher::match(const WeakHeapPtr<JSAtom*>& entry,
+ const Lookup& lookup) {
+ JSAtom* key = entry.unbarrieredGet();
+ if (lookup.atom) {
+ return lookup.atom == key;
+ }
+ if (key->length() != lookup.length || key->hash() != lookup.hash) {
+ return false;
+ }
+
+ if (key->hasLatin1Chars()) {
+ const Latin1Char* keyChars = key->latin1Chars(lookup.nogc);
+ switch (lookup.type) {
+ case Lookup::Latin1:
+ return EqualChars(keyChars, lookup.latin1Chars, lookup.length);
+ case Lookup::TwoByteChar:
+ return EqualChars(keyChars, lookup.twoByteChars, lookup.length);
+ case Lookup::UTF8: {
+ JS::UTF8Chars utf8(lookup.utf8Bytes, lookup.byteLength);
+ return UTF8EqualsChars(utf8, keyChars);
+ }
+ }
+ }
+
+ const char16_t* keyChars = key->twoByteChars(lookup.nogc);
+ switch (lookup.type) {
+ case Lookup::Latin1:
+ return EqualChars(lookup.latin1Chars, keyChars, lookup.length);
+ case Lookup::TwoByteChar:
+ return EqualChars(keyChars, lookup.twoByteChars, lookup.length);
+ case Lookup::UTF8: {
+ JS::UTF8Chars utf8(lookup.utf8Bytes, lookup.byteLength);
+ return UTF8EqualsChars(utf8, keyChars);
+ }
+ }
+
+ MOZ_ASSERT_UNREACHABLE("AtomHasher::match unknown type");
+ return false;
+}
+
+UniqueChars js::AtomToPrintableString(JSContext* cx, JSAtom* atom) {
+ return QuoteString(cx, atom);
+}
+
+// Use a low initial capacity for the permanent atoms table to avoid penalizing
+// runtimes that create a small number of atoms.
+static const uint32_t JS_PERMANENT_ATOM_SIZE = 64;
+
+MOZ_ALWAYS_INLINE AtomSet::Ptr js::FrozenAtomSet::readonlyThreadsafeLookup(
+ const AtomSet::Lookup& l) const {
+ return mSet->readonlyThreadsafeLookup(l);
+}
+
+static JSAtom* PermanentlyAtomizeCharsValidLength(JSContext* cx,
+ AtomSet& atomSet,
+ mozilla::HashNumber hash,
+ const Latin1Char* chars,
+ size_t length);
+
+bool JSRuntime::initializeAtoms(JSContext* cx) {
+ JS::AutoAssertNoGC nogc;
+
+ MOZ_ASSERT(!atoms_);
+ MOZ_ASSERT(!permanentAtoms_);
+
+ if (parentRuntime) {
+ permanentAtoms_ = parentRuntime->permanentAtoms_;
+
+ staticStrings = parentRuntime->staticStrings;
+ commonNames = parentRuntime->commonNames;
+ emptyString = parentRuntime->emptyString;
+ wellKnownSymbols = parentRuntime->wellKnownSymbols;
+
+ atoms_ = js_new<AtomsTable>();
+ return bool(atoms_);
+ }
+
+ // NOTE: There's no GC, but `gc.freezePermanentSharedThings` below contains
+ // a function call that's marked as "Can GC".
+ Rooted<UniquePtr<AtomSet>> atomSet(cx,
+ cx->new_<AtomSet>(JS_PERMANENT_ATOM_SIZE));
+ if (!atomSet) {
+ return false;
+ }
+
+ staticStrings = js_new<StaticStrings>();
+ if (!staticStrings || !staticStrings->init(cx)) {
+ return false;
+ }
+
+ // The bare symbol names are already part of the well-known set, but their
+ // descriptions are not, so enumerate them here and add them to the initial
+ // permanent atoms set below.
+ static const WellKnownAtomInfo symbolDescInfo[] = {
+#define COMMON_NAME_INFO(NAME) \
+ {uint32_t(sizeof("Symbol." #NAME) - 1), \
+ mozilla::HashStringKnownLength("Symbol." #NAME, \
+ sizeof("Symbol." #NAME) - 1), \
+ "Symbol." #NAME},
+ JS_FOR_EACH_WELL_KNOWN_SYMBOL(COMMON_NAME_INFO)
+#undef COMMON_NAME_INFO
+ };
+
+ commonNames = js_new<JSAtomState>();
+ if (!commonNames) {
+ return false;
+ }
+
+ ImmutableTenuredPtr<PropertyName*>* names =
+ reinterpret_cast<ImmutableTenuredPtr<PropertyName*>*>(commonNames.ref());
+ for (size_t i = 0; i < uint32_t(WellKnownAtomId::Limit); i++) {
+ const auto& info = wellKnownAtomInfos[i];
+ JSAtom* atom = PermanentlyAtomizeCharsValidLength(
+ cx, *atomSet, info.hash,
+ reinterpret_cast<const Latin1Char*>(info.content), info.length);
+ if (!atom) {
+ return false;
+ }
+ names->init(atom->asPropertyName());
+ names++;
+ }
+
+ for (const auto& info : symbolDescInfo) {
+ JSAtom* atom = PermanentlyAtomizeCharsNonStaticValidLength(
+ cx, *atomSet, info.hash,
+ reinterpret_cast<const Latin1Char*>(info.content), info.length);
+ if (!atom) {
+ return false;
+ }
+ names->init(atom->asPropertyName());
+ names++;
+ }
+ MOZ_ASSERT(uintptr_t(names) == uintptr_t(commonNames + 1));
+
+ emptyString = commonNames->empty;
+
+ // The self-hosted atoms are those that exist in a self-hosted JS source file,
+ // but are not defined in any of the well-known atom collections.
+ if (!cx->runtime()->selfHostStencil_->instantiateSelfHostedAtoms(
+ cx, *atomSet, cx->runtime()->selfHostStencilInput_->atomCache)) {
+ return false;
+ }
+
+ // Create the well-known symbols.
+ auto wks = js_new<WellKnownSymbols>();
+ if (!wks) {
+ return false;
+ }
+
+ {
+ // Prevent GC until we have fully initialized the well known symbols table.
+ // Faster than zeroing the array and null checking during every GC.
+ gc::AutoSuppressGC nogc(cx);
+
+ ImmutableTenuredPtr<PropertyName*>* descriptions =
+ commonNames->wellKnownSymbolDescriptions();
+ ImmutableTenuredPtr<JS::Symbol*>* symbols =
+ reinterpret_cast<ImmutableTenuredPtr<JS::Symbol*>*>(wks);
+ for (size_t i = 0; i < JS::WellKnownSymbolLimit; i++) {
+ JS::Symbol* symbol =
+ JS::Symbol::newWellKnown(cx, JS::SymbolCode(i), descriptions[i]);
+ if (!symbol) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ symbols[i].init(symbol);
+ }
+
+ wellKnownSymbols = wks;
+ }
+
+ if (!gc.freezeSharedAtomsZone()) {
+ return false;
+ }
+
+ // The permanent atoms table has now been populated.
+ permanentAtoms_ =
+ js_new<FrozenAtomSet>(atomSet.release()); // Takes ownership.
+ if (!permanentAtoms_) {
+ return false;
+ }
+
+ // Initialize the main atoms table.
+ atoms_ = js_new<AtomsTable>();
+ if (!atoms_) {
+ return false;
+ }
+
+ return true;
+}
+
+void JSRuntime::finishAtoms() {
+ js_delete(atoms_.ref());
+
+ if (!parentRuntime) {
+ js_delete(permanentAtoms_.ref());
+ js_delete(staticStrings.ref());
+ js_delete(commonNames.ref());
+ js_delete(wellKnownSymbols.ref());
+ }
+
+ atoms_ = nullptr;
+ permanentAtoms_ = nullptr;
+ staticStrings = nullptr;
+ commonNames = nullptr;
+ wellKnownSymbols = nullptr;
+ emptyString = nullptr;
+}
+
+AtomsTable::AtomsTable()
+ : atoms(InitialTableSize), atomsAddedWhileSweeping(nullptr) {}
+
+AtomsTable::~AtomsTable() { MOZ_ASSERT(!atomsAddedWhileSweeping); }
+
+void AtomsTable::tracePinnedAtoms(JSTracer* trc) {
+ for (JSAtom* atom : pinnedAtoms) {
+ TraceRoot(trc, &atom, "pinned atom");
+ }
+}
+
+void js::TraceAtoms(JSTracer* trc) {
+ JSRuntime* rt = trc->runtime();
+ if (rt->permanentAtomsPopulated()) {
+ rt->atoms().tracePinnedAtoms(trc);
+ }
+}
+
+void AtomsTable::traceWeak(JSTracer* trc) {
+ for (AtomSet::Enum e(atoms); !e.empty(); e.popFront()) {
+ JSAtom* atom = e.front().unbarrieredGet();
+ MOZ_DIAGNOSTIC_ASSERT(atom);
+ if (!TraceManuallyBarrieredWeakEdge(trc, &atom, "AtomsTable::atoms")) {
+ e.removeFront();
+ } else {
+ MOZ_ASSERT(atom == e.front().unbarrieredGet());
+ }
+ }
+}
+
+bool AtomsTable::startIncrementalSweep(Maybe<SweepIterator>& atomsToSweepOut) {
+ MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
+ MOZ_ASSERT(atomsToSweepOut.isNothing());
+ MOZ_ASSERT(!atomsAddedWhileSweeping);
+
+ atomsAddedWhileSweeping = js_new<AtomSet>();
+ if (!atomsAddedWhileSweeping) {
+ return false;
+ }
+
+ atomsToSweepOut.emplace(atoms);
+
+ return true;
+}
+
+void AtomsTable::mergeAtomsAddedWhileSweeping() {
+ // Add atoms that were added to the secondary table while we were sweeping
+ // the main table.
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ auto newAtoms = atomsAddedWhileSweeping;
+ atomsAddedWhileSweeping = nullptr;
+
+ for (auto r = newAtoms->all(); !r.empty(); r.popFront()) {
+ if (!atoms.putNew(AtomHasher::Lookup(r.front().unbarrieredGet()),
+ r.front())) {
+ oomUnsafe.crash("Adding atom from secondary table after sweep");
+ }
+ }
+
+ js_delete(newAtoms);
+}
+
+bool AtomsTable::sweepIncrementally(SweepIterator& atomsToSweep,
+ SliceBudget& budget) {
+ // Sweep the table incrementally until we run out of work or budget.
+ while (!atomsToSweep.empty()) {
+ budget.step();
+ if (budget.isOverBudget()) {
+ return false;
+ }
+
+ JSAtom* atom = atomsToSweep.front().unbarrieredGet();
+ MOZ_DIAGNOSTIC_ASSERT(atom);
+ if (IsAboutToBeFinalizedUnbarriered(atom)) {
+ MOZ_ASSERT(!atom->isPinned());
+ atomsToSweep.removeFront();
+ } else {
+ MOZ_ASSERT(atom == atomsToSweep.front().unbarrieredGet());
+ }
+ atomsToSweep.popFront();
+ }
+
+ mergeAtomsAddedWhileSweeping();
+ return true;
+}
+
+size_t AtomsTable::sizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t size = sizeof(AtomsTable);
+ size += atoms.shallowSizeOfExcludingThis(mallocSizeOf);
+ if (atomsAddedWhileSweeping) {
+ size += atomsAddedWhileSweeping->shallowSizeOfExcludingThis(mallocSizeOf);
+ }
+ size += pinnedAtoms.sizeOfExcludingThis(mallocSizeOf);
+ return size;
+}
+
+template <typename CharT>
+static MOZ_ALWAYS_INLINE JSAtom*
+AtomizeAndCopyCharsNonStaticValidLengthFromLookup(
+ JSContext* cx, const CharT* chars, size_t length,
+ const AtomHasher::Lookup& lookup, const Maybe<uint32_t>& indexValue) {
+ // Try the per-Zone cache first. If we find the atom there we can avoid the
+ // markAtom call, and the multiple HashSet lookups below.
+ Zone* zone = cx->zone();
+ MOZ_ASSERT(zone);
+ AtomSet::AddPtr zonePtr = zone->atomCache().lookupForAdd(lookup);
+ if (zonePtr) {
+ // The cache is purged on GC so if we're in the middle of an
+ // incremental GC we should have barriered the atom when we put
+ // it in the cache.
+ JSAtom* atom = zonePtr->unbarrieredGet();
+ MOZ_ASSERT(AtomIsMarked(zone, atom));
+ return atom;
+ }
+
+ MOZ_ASSERT(cx->permanentAtomsPopulated());
+
+ AtomSet::Ptr pp = cx->permanentAtoms().readonlyThreadsafeLookup(lookup);
+ if (pp) {
+ JSAtom* atom = pp->get();
+ if (MOZ_UNLIKELY(!zone->atomCache().add(zonePtr, atom))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ return atom;
+ }
+
+ JSAtom* atom = cx->atoms().atomizeAndCopyCharsNonStaticValidLength(
+ cx, chars, length, indexValue, lookup);
+ if (!atom) {
+ return nullptr;
+ }
+
+ if (MOZ_UNLIKELY(!cx->atomMarking().inlinedMarkAtomFallible(cx, atom))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ if (MOZ_UNLIKELY(!zone->atomCache().add(zonePtr, atom))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ return atom;
+}
+
+template <typename CharT>
+static MOZ_ALWAYS_INLINE JSAtom* AllocateNewAtomNonStaticValidLength(
+ JSContext* cx, const CharT* chars, size_t length,
+ const Maybe<uint32_t>& indexValue, const AtomHasher::Lookup& lookup);
+
+template <typename CharT>
+static MOZ_ALWAYS_INLINE JSAtom* AllocateNewPermanentAtomNonStaticValidLength(
+ JSContext* cx, const CharT* chars, size_t length,
+ const AtomHasher::Lookup& lookup);
+
+template <typename CharT>
+MOZ_ALWAYS_INLINE JSAtom* AtomsTable::atomizeAndCopyCharsNonStaticValidLength(
+ JSContext* cx, const CharT* chars, size_t length,
+ const Maybe<uint32_t>& indexValue, const AtomHasher::Lookup& lookup) {
+ AtomSet::AddPtr p;
+
+ if (!atomsAddedWhileSweeping) {
+ p = atoms.lookupForAdd(lookup);
+ } else {
+ // We're currently sweeping the main atoms table and all new atoms will
+ // be added to a secondary table. Check this first.
+ p = atomsAddedWhileSweeping->lookupForAdd(lookup);
+
+ // If that fails check the main table but check if any atom found there
+ // is dead.
+ if (!p) {
+ if (AtomSet::AddPtr p2 = atoms.lookupForAdd(lookup)) {
+ JSAtom* atom = p2->unbarrieredGet();
+ if (!IsAboutToBeFinalizedUnbarriered(atom)) {
+ p = p2;
+ }
+ }
+ }
+ }
+
+ if (p) {
+ return p->get();
+ }
+
+ JSAtom* atom = AllocateNewAtomNonStaticValidLength(cx, chars, length,
+ indexValue, lookup);
+ if (!atom) {
+ return nullptr;
+ }
+
+ // The operations above can't GC; therefore the atoms table has not been
+ // modified and p is still valid.
+ AtomSet* addSet = atomsAddedWhileSweeping ? atomsAddedWhileSweeping : &atoms;
+ if (MOZ_UNLIKELY(!addSet->add(p, atom))) {
+ ReportOutOfMemory(cx); /* SystemAllocPolicy does not report OOM. */
+ return nullptr;
+ }
+
+ return atom;
+}
+
+/* |chars| must not point into an inline or short string. */
+template <typename CharT>
+static MOZ_ALWAYS_INLINE JSAtom* AtomizeAndCopyChars(
+ JSContext* cx, const CharT* chars, size_t length,
+ const Maybe<uint32_t>& indexValue, const Maybe<js::HashNumber>& hash) {
+ if (JSAtom* s = cx->staticStrings().lookup(chars, length)) {
+ return s;
+ }
+
+ if (MOZ_UNLIKELY(!JSString::validateLength(cx, length))) {
+ return nullptr;
+ }
+
+ if (hash.isSome()) {
+ AtomHasher::Lookup lookup(hash.value(), chars, length);
+ return AtomizeAndCopyCharsNonStaticValidLengthFromLookup(
+ cx, chars, length, lookup, indexValue);
+ }
+
+ AtomHasher::Lookup lookup(chars, length);
+ return AtomizeAndCopyCharsNonStaticValidLengthFromLookup(cx, chars, length,
+ lookup, indexValue);
+}
+
+template <typename CharT>
+static MOZ_NEVER_INLINE JSAtom*
+PermanentlyAtomizeAndCopyCharsNonStaticValidLength(
+ JSContext* cx, AtomSet& atomSet, const CharT* chars, size_t length,
+ const AtomHasher::Lookup& lookup) {
+ MOZ_ASSERT(!cx->permanentAtomsPopulated());
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+
+ AtomSet::AddPtr p = atomSet.lookupForAdd(lookup);
+ if (p) {
+ return p->get();
+ }
+
+ JSAtom* atom =
+ AllocateNewPermanentAtomNonStaticValidLength(cx, chars, length, lookup);
+ if (!atom) {
+ return nullptr;
+ }
+
+ // We are single threaded at this point, and the operations we've done since
+ // then can't GC; therefore the atoms table has not been modified and p is
+ // still valid.
+ if (!atomSet.add(p, atom)) {
+ ReportOutOfMemory(cx); /* SystemAllocPolicy does not report OOM. */
+ return nullptr;
+ }
+
+ return atom;
+}
+
+struct AtomizeUTF8CharsWrapper {
+ JS::UTF8Chars utf8;
+ JS::SmallestEncoding encoding;
+
+ AtomizeUTF8CharsWrapper(const JS::UTF8Chars& chars,
+ JS::SmallestEncoding minEncode)
+ : utf8(chars), encoding(minEncode) {}
+};
+
+// NewAtomNonStaticValidLength has 3 variants.
+// This is used by Latin1Char and char16_t.
+template <typename CharT>
+static MOZ_ALWAYS_INLINE JSAtom* NewAtomNonStaticValidLength(
+ JSContext* cx, const CharT* chars, size_t length, js::HashNumber hash) {
+ return NewAtomCopyNMaybeDeflateValidLength(cx, chars, length, hash);
+}
+
+template <typename CharT>
+static MOZ_ALWAYS_INLINE JSAtom* MakeUTF8AtomHelperNonStaticValidLength(
+ JSContext* cx, const AtomizeUTF8CharsWrapper* chars, size_t length,
+ js::HashNumber hash) {
+ if (JSInlineString::lengthFits<CharT>(length)) {
+ CharT* storage;
+ JSAtom* str = AllocateInlineAtom(cx, length, &storage, hash);
+ if (!str) {
+ return nullptr;
+ }
+
+ InflateUTF8CharsToBuffer(chars->utf8, storage, length, chars->encoding);
+ return str;
+ }
+
+ // MakeAtomUTF8Helper is called from deep in the Atomization path, which
+ // expects functions to fail gracefully with nullptr on OOM, without throwing.
+ UniquePtr<CharT[], JS::FreePolicy> newStr(
+ js_pod_arena_malloc<CharT>(js::StringBufferArena, length));
+ if (!newStr) {
+ return nullptr;
+ }
+
+ InflateUTF8CharsToBuffer(chars->utf8, newStr.get(), length, chars->encoding);
+
+ return JSAtom::newValidLength(cx, std::move(newStr), length, hash);
+}
+
+// Another variant of NewAtomNonStaticValidLength.
+static MOZ_ALWAYS_INLINE JSAtom* NewAtomNonStaticValidLength(
+ JSContext* cx, const AtomizeUTF8CharsWrapper* chars, size_t length,
+ js::HashNumber hash) {
+ if (length == 0) {
+ return cx->emptyString();
+ }
+
+ if (chars->encoding == JS::SmallestEncoding::UTF16) {
+ return MakeUTF8AtomHelperNonStaticValidLength<char16_t>(cx, chars, length,
+ hash);
+ }
+ return MakeUTF8AtomHelperNonStaticValidLength<JS::Latin1Char>(cx, chars,
+ length, hash);
+}
+
+template <typename CharT>
+static MOZ_ALWAYS_INLINE JSAtom* AllocateNewAtomNonStaticValidLength(
+ JSContext* cx, const CharT* chars, size_t length,
+ const Maybe<uint32_t>& indexValue, const AtomHasher::Lookup& lookup) {
+ AutoAllocInAtomsZone ac(cx);
+
+ JSAtom* atom = NewAtomNonStaticValidLength(cx, chars, length, lookup.hash);
+ if (!atom) {
+ // Grudgingly forgo last-ditch GC. The alternative would be to manually GC
+ // here, and retry from the top.
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ MOZ_ASSERT(atom->hash() == lookup.hash);
+
+ if (indexValue) {
+ atom->setIsIndex(*indexValue);
+ } else {
+ // We need to call isIndexSlow directly to avoid the flag check in isIndex,
+ // because we still have to initialize that flag.
+ uint32_t index;
+ if (atom->isIndexSlow(&index)) {
+ atom->setIsIndex(index);
+ }
+ }
+
+ return atom;
+}
+
+template <typename CharT>
+static MOZ_ALWAYS_INLINE JSAtom* AllocateNewPermanentAtomNonStaticValidLength(
+ JSContext* cx, const CharT* chars, size_t length,
+ const AtomHasher::Lookup& lookup) {
+ AutoAllocInAtomsZone ac(cx);
+
+#ifdef DEBUG
+ if constexpr (std::is_same_v<CharT, char16_t>) {
+ // Can call DontDeflate variant.
+ MOZ_ASSERT(!CanStoreCharsAsLatin1(chars, length));
+ }
+#endif
+
+ JSAtom* atom =
+ NewAtomCopyNDontDeflateValidLength(cx, chars, length, lookup.hash);
+ if (!atom) {
+ // Do not bother with a last-ditch GC here since we are very early in
+ // startup and there is no potential garbage to collect.
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ atom->makePermanent();
+
+ MOZ_ASSERT(atom->hash() == lookup.hash);
+
+ uint32_t index;
+ if (atom->isIndexSlow(&index)) {
+ atom->setIsIndex(index);
+ }
+
+ return atom;
+}
+
+JSAtom* js::AtomizeString(JSContext* cx, JSString* str) {
+ MOZ_ASSERT(cx->isMainThreadContext());
+
+ if (str->isAtom()) {
+ return &str->asAtom();
+ }
+
+ if (JSAtom* atom = cx->caches().stringToAtomCache.lookup(str)) {
+ return atom;
+ }
+
+ JS::Latin1Char flattenRope[StringToAtomCache::MinStringLength];
+ mozilla::Maybe<StringToAtomCache::AtomTableKey> key;
+ size_t length = str->length();
+ if (str->isRope() && length < StringToAtomCache::MinStringLength &&
+ str->hasLatin1Chars()) {
+ StringSegmentRange<StringToAtomCache::MinStringLength> iter(cx);
+ if (iter.init(str)) {
+ size_t index = 0;
+ do {
+ const JSLinearString* s = iter.front();
+ CopyChars(flattenRope + index, *s);
+ index += s->length();
+ } while (iter.popFront() && !iter.empty());
+
+ if (JSAtom* atom = cx->caches().stringToAtomCache.lookupWithRopeChars(
+ flattenRope, length, key)) {
+ // Since this cache lookup is based on a string comparison, not object
+ // identity, need to mark atom explicitly in this case. And this is
+ // not done in lookup() itself, because #including JSContext.h there
+ // causes some non-trivial #include ordering issues.
+ cx->markAtom(atom);
+ return atom;
+ }
+ }
+ }
+
+ Maybe<uint32_t> indexValue;
+ if (str->hasIndexValue()) {
+ indexValue.emplace(str->getIndexValue());
+ }
+
+ JSAtom* atom = nullptr;
+ if (key.isSome()) {
+ atom = AtomizeAndCopyChars(cx, key.value().string_, key.value().length_,
+ indexValue, mozilla::Some(key.value().hash_));
+ } else {
+ JSLinearString* linear = str->ensureLinear(cx);
+ if (!linear) {
+ return nullptr;
+ }
+
+ JS::AutoCheckCannotGC nogc;
+ atom = linear->hasLatin1Chars()
+ ? AtomizeAndCopyChars(cx, linear->latin1Chars(nogc),
+ linear->length(), indexValue, Nothing())
+ : AtomizeAndCopyChars(cx, linear->twoByteChars(nogc),
+ linear->length(), indexValue, Nothing());
+ }
+
+ if (!atom) {
+ return nullptr;
+ }
+
+ cx->caches().stringToAtomCache.maybePut(str, atom, key);
+
+ return atom;
+}
+
+bool js::AtomIsPinned(JSContext* cx, JSAtom* atom) { return atom->isPinned(); }
+
+bool js::PinAtom(JSContext* cx, JSAtom* atom) {
+ JS::AutoCheckCannotGC nogc;
+ return cx->runtime()->atoms().maybePinExistingAtom(cx, atom);
+}
+
+bool AtomsTable::maybePinExistingAtom(JSContext* cx, JSAtom* atom) {
+ MOZ_ASSERT(atom);
+
+ if (atom->isPinned()) {
+ return true;
+ }
+
+ if (!pinnedAtoms.append(atom)) {
+ return false;
+ }
+
+ atom->setPinned();
+ return true;
+}
+
+JSAtom* js::Atomize(JSContext* cx, const char* bytes, size_t length,
+ const Maybe<uint32_t>& indexValue) {
+ const Latin1Char* chars = reinterpret_cast<const Latin1Char*>(bytes);
+ return AtomizeAndCopyChars(cx, chars, length, indexValue, Nothing());
+}
+
+template <typename CharT>
+JSAtom* js::AtomizeChars(JSContext* cx, const CharT* chars, size_t length) {
+ return AtomizeAndCopyChars(cx, chars, length, Nothing(), Nothing());
+}
+
+template JSAtom* js::AtomizeChars(JSContext* cx, const Latin1Char* chars,
+ size_t length);
+
+template JSAtom* js::AtomizeChars(JSContext* cx, const char16_t* chars,
+ size_t length);
+
+JSAtom* js::AtomizeWithoutActiveZone(JSContext* cx, const char* bytes,
+ size_t length) {
+ // This is used to implement JS_AtomizeAndPinString{N} when called without an
+ // active zone. This simplifies the normal atomization code because it can
+ // assume a non-null cx->zone().
+
+ MOZ_ASSERT(!cx->zone());
+ MOZ_ASSERT(cx->permanentAtomsPopulated());
+
+ const Latin1Char* chars = reinterpret_cast<const Latin1Char*>(bytes);
+
+ if (JSAtom* s = cx->staticStrings().lookup(chars, length)) {
+ return s;
+ }
+
+ if (MOZ_UNLIKELY(!JSString::validateLength(cx, length))) {
+ return nullptr;
+ }
+
+ AtomHasher::Lookup lookup(chars, length);
+ if (AtomSet::Ptr pp = cx->permanentAtoms().readonlyThreadsafeLookup(lookup)) {
+ return pp->get();
+ }
+
+ return cx->atoms().atomizeAndCopyCharsNonStaticValidLength(cx, chars, length,
+ Nothing(), lookup);
+}
+
+/* |chars| must not point into an inline or short string. */
+template <typename CharT>
+JSAtom* js::AtomizeCharsNonStaticValidLength(JSContext* cx, HashNumber hash,
+ const CharT* chars,
+ size_t length) {
+ MOZ_ASSERT(!cx->staticStrings().lookup(chars, length));
+
+ AtomHasher::Lookup lookup(hash, chars, length);
+ return AtomizeAndCopyCharsNonStaticValidLengthFromLookup(cx, chars, length,
+ lookup, Nothing());
+}
+
+template JSAtom* js::AtomizeCharsNonStaticValidLength(JSContext* cx,
+ HashNumber hash,
+ const Latin1Char* chars,
+ size_t length);
+
+template JSAtom* js::AtomizeCharsNonStaticValidLength(JSContext* cx,
+ HashNumber hash,
+ const char16_t* chars,
+ size_t length);
+
+static JSAtom* PermanentlyAtomizeCharsValidLength(JSContext* cx,
+ AtomSet& atomSet,
+ HashNumber hash,
+ const Latin1Char* chars,
+ size_t length) {
+ if (JSAtom* s = cx->staticStrings().lookup(chars, length)) {
+ return s;
+ }
+
+ return PermanentlyAtomizeCharsNonStaticValidLength(cx, atomSet, hash, chars,
+ length);
+}
+
+JSAtom* js::PermanentlyAtomizeCharsNonStaticValidLength(JSContext* cx,
+ AtomSet& atomSet,
+ HashNumber hash,
+ const Latin1Char* chars,
+ size_t length) {
+ MOZ_ASSERT(!cx->staticStrings().lookup(chars, length));
+ MOZ_ASSERT(length <= JSString::MAX_LENGTH);
+
+ AtomHasher::Lookup lookup(hash, chars, length);
+ return PermanentlyAtomizeAndCopyCharsNonStaticValidLength(cx, atomSet, chars,
+ length, lookup);
+}
+
+JSAtom* js::AtomizeUTF8Chars(JSContext* cx, const char* utf8Chars,
+ size_t utf8ByteLength) {
+ {
+ StaticStrings& statics = cx->staticStrings();
+
+ // Handle all pure-ASCII UTF-8 static strings.
+ if (JSAtom* s = statics.lookup(utf8Chars, utf8ByteLength)) {
+ return s;
+ }
+
+ // The only non-ASCII static strings are the single-code point strings
+ // U+0080 through U+00FF, encoded as
+ //
+ // 0b1100'00xx 0b10xx'xxxx
+ //
+ // where the encoded code point is the concatenation of the 'x' bits -- and
+ // where the highest 'x' bit is necessarily 1 (because U+0080 through U+00FF
+ // all contain an 0x80 bit).
+ if (utf8ByteLength == 2) {
+ auto first = static_cast<uint8_t>(utf8Chars[0]);
+ if ((first & 0b1111'1110) == 0b1100'0010) {
+ auto second = static_cast<uint8_t>(utf8Chars[1]);
+ if (mozilla::IsTrailingUnit(mozilla::Utf8Unit(second))) {
+ uint8_t unit =
+ static_cast<uint8_t>(first << 6) | (second & 0b0011'1111);
+
+ MOZ_ASSERT(StaticStrings::hasUnit(unit));
+ return statics.getUnit(unit);
+ }
+ }
+
+ // Fallthrough code handles the cases where the two units aren't a Latin-1
+ // code point or are invalid.
+ }
+ }
+
+ size_t length;
+ HashNumber hash;
+ JS::SmallestEncoding forCopy;
+ JS::UTF8Chars utf8(utf8Chars, utf8ByteLength);
+ if (!GetUTF8AtomizationData(cx, utf8, &length, &forCopy, &hash)) {
+ return nullptr;
+ }
+
+ if (MOZ_UNLIKELY(!JSString::validateLength(cx, length))) {
+ return nullptr;
+ }
+
+ AtomizeUTF8CharsWrapper chars(utf8, forCopy);
+ AtomHasher::Lookup lookup(utf8Chars, utf8ByteLength, length, hash);
+ return AtomizeAndCopyCharsNonStaticValidLengthFromLookup(cx, &chars, length,
+ lookup, Nothing());
+}
+
+bool js::IndexToIdSlow(JSContext* cx, uint32_t index, MutableHandleId idp) {
+ MOZ_ASSERT(index > JS::PropertyKey::IntMax);
+
+ char16_t buf[UINT32_CHAR_BUFFER_LENGTH];
+ RangedPtr<char16_t> end(std::end(buf), buf, std::end(buf));
+ RangedPtr<char16_t> start = BackfillIndexInCharBuffer(index, end);
+
+ JSAtom* atom = AtomizeChars(cx, start.get(), end - start);
+ if (!atom) {
+ return false;
+ }
+
+ idp.set(JS::PropertyKey::NonIntAtom(atom));
+ return true;
+}
+
+template <AllowGC allowGC>
+static MOZ_ALWAYS_INLINE JSAtom* PrimitiveToAtom(JSContext* cx,
+ const Value& v) {
+ MOZ_ASSERT(v.isPrimitive());
+ switch (v.type()) {
+ case ValueType::String: {
+ JSAtom* atom = AtomizeString(cx, v.toString());
+ if (!allowGC && !atom) {
+ cx->recoverFromOutOfMemory();
+ }
+ return atom;
+ }
+ case ValueType::Int32: {
+ JSAtom* atom = Int32ToAtom(cx, v.toInt32());
+ if (!allowGC && !atom) {
+ cx->recoverFromOutOfMemory();
+ }
+ return atom;
+ }
+ case ValueType::Double: {
+ JSAtom* atom = NumberToAtom(cx, v.toDouble());
+ if (!allowGC && !atom) {
+ cx->recoverFromOutOfMemory();
+ }
+ return atom;
+ }
+ case ValueType::Boolean:
+ return v.toBoolean() ? cx->names().true_ : cx->names().false_;
+ case ValueType::Null:
+ return cx->names().null;
+ case ValueType::Undefined:
+ return cx->names().undefined;
+ case ValueType::Symbol:
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ if constexpr (allowGC) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SYMBOL_TO_STRING);
+ }
+ return nullptr;
+ case ValueType::BigInt: {
+ RootedBigInt i(cx, v.toBigInt());
+ return BigIntToAtom<allowGC>(cx, i);
+ }
+#ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive:
+#endif
+ case ValueType::Object:
+ case ValueType::Magic:
+ case ValueType::PrivateGCThing:
+ break;
+ }
+ MOZ_CRASH("Unexpected type");
+}
+
+template <AllowGC allowGC>
+static JSAtom* ToAtomSlow(
+ JSContext* cx, typename MaybeRooted<Value, allowGC>::HandleType arg) {
+ MOZ_ASSERT(!arg.isString());
+
+ Value v = arg;
+ if (!v.isPrimitive()) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ if (!allowGC) {
+ return nullptr;
+ }
+ RootedValue v2(cx, v);
+ if (!ToPrimitive(cx, JSTYPE_STRING, &v2)) {
+ return nullptr;
+ }
+ v = v2;
+ }
+
+ return PrimitiveToAtom<allowGC>(cx, v);
+}
+
+template <AllowGC allowGC>
+JSAtom* js::ToAtom(JSContext* cx,
+ typename MaybeRooted<Value, allowGC>::HandleType v) {
+ if (!v.isString()) {
+ return ToAtomSlow<allowGC>(cx, v);
+ }
+
+ JSString* str = v.toString();
+ if (str->isAtom()) {
+ return &str->asAtom();
+ }
+
+ JSAtom* atom = AtomizeString(cx, str);
+ if (!atom && !allowGC) {
+ MOZ_ASSERT_IF(!cx->isHelperThreadContext(), cx->isThrowingOutOfMemory());
+ cx->recoverFromOutOfMemory();
+ }
+ return atom;
+}
+
+template JSAtom* js::ToAtom<CanGC>(JSContext* cx, HandleValue v);
+template JSAtom* js::ToAtom<NoGC>(JSContext* cx, const Value& v);
+
+template <AllowGC allowGC>
+bool js::PrimitiveValueToIdSlow(
+ JSContext* cx, typename MaybeRooted<Value, allowGC>::HandleType v,
+ typename MaybeRooted<jsid, allowGC>::MutableHandleType idp) {
+ MOZ_ASSERT(v.isPrimitive());
+ MOZ_ASSERT(!v.isString());
+ MOZ_ASSERT(!v.isSymbol());
+ MOZ_ASSERT_IF(v.isInt32(), !PropertyKey::fitsInInt(v.toInt32()));
+
+ int32_t i;
+ if (v.isDouble() && mozilla::NumberEqualsInt32(v.toDouble(), &i) &&
+ PropertyKey::fitsInInt(i)) {
+ idp.set(PropertyKey::Int(i));
+ return true;
+ }
+
+ JSAtom* atom = PrimitiveToAtom<allowGC>(cx, v);
+ if (!atom) {
+ return false;
+ }
+
+ idp.set(AtomToId(atom));
+ return true;
+}
+
+template bool js::PrimitiveValueToIdSlow<CanGC>(JSContext* cx, HandleValue v,
+ MutableHandleId idp);
+template bool js::PrimitiveValueToIdSlow<NoGC>(JSContext* cx, const Value& v,
+ FakeMutableHandle<jsid> idp);
+
+#ifdef ENABLE_RECORD_TUPLE
+bool js::EnsureAtomized(JSContext* cx, MutableHandleValue v, bool* updated) {
+ if (v.isString()) {
+ if (v.toString()->isAtom()) {
+ *updated = false;
+ return true;
+ }
+
+ JSAtom* atom = AtomizeString(cx, v.toString());
+ if (!atom) {
+ return false;
+ }
+ v.setString(atom);
+ *updated = true;
+ return true;
+ }
+
+ *updated = false;
+
+ if (v.isExtendedPrimitive()) {
+ JSObject& obj = v.toExtendedPrimitive();
+ if (obj.is<RecordType>()) {
+ return obj.as<RecordType>().ensureAtomized(cx);
+ }
+ MOZ_ASSERT(obj.is<TupleType>());
+ return obj.as<TupleType>().ensureAtomized(cx);
+ }
+ return true;
+}
+#endif
+
+Handle<PropertyName*> js::ClassName(JSProtoKey key, JSContext* cx) {
+ return ClassName(key, cx->names());
+}
diff --git a/js/src/vm/JSAtom.h b/js/src/vm/JSAtom.h
new file mode 100644
index 0000000000..6c06025485
--- /dev/null
+++ b/js/src/vm/JSAtom.h
@@ -0,0 +1,113 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_JSAtom_h
+#define vm_JSAtom_h
+
+#include "mozilla/HashFunctions.h"
+#include "mozilla/Maybe.h"
+
+#include "NamespaceImports.h"
+
+#include "gc/MaybeRooted.h"
+#include "js/TypeDecls.h"
+#include "js/Utility.h"
+
+namespace js {
+
+class AtomSet;
+
+/*
+ * Return a printable, lossless char[] representation of a string-type atom.
+ * The returned string is guaranteed to contain only ASCII characters.
+ */
+extern UniqueChars AtomToPrintableString(JSContext* cx, JSAtom* atom);
+
+class PropertyName;
+
+} /* namespace js */
+
+namespace js {
+
+/*
+ * Atom tracing and garbage collection hooks.
+ */
+void TraceAtoms(JSTracer* trc);
+
+extern JSAtom* Atomize(
+ JSContext* cx, const char* bytes, size_t length,
+ const mozilla::Maybe<uint32_t>& indexValue = mozilla::Nothing());
+
+extern JSAtom* AtomizeWithoutActiveZone(JSContext* cx, const char* bytes,
+ size_t length);
+
+template <typename CharT>
+extern JSAtom* AtomizeChars(JSContext* cx, const CharT* chars, size_t length);
+
+/*
+ * Optimized entry points for atomization.
+ *
+ * The meaning of suffix:
+ * * "NonStatic": characters don't match StaticStrings
+ * * "ValidLength": length fits JSString::MAX_LENGTH
+ */
+
+/* Atomize characters when the value of HashString is already known. */
+template <typename CharT>
+extern JSAtom* AtomizeCharsNonStaticValidLength(JSContext* cx,
+ mozilla::HashNumber hash,
+ const CharT* chars,
+ size_t length);
+
+/**
+ * Permanently atomize characters.
+ *
+ * `chars` shouldn't match any of StaticStrings entry.
+ * `length` should be validated by JSString::validateLength.
+ */
+extern JSAtom* PermanentlyAtomizeCharsNonStaticValidLength(
+ JSContext* cx, AtomSet& atomSet, mozilla::HashNumber hash,
+ const Latin1Char* chars, size_t length);
+
+/**
+ * Create an atom whose contents are those of the |utf8ByteLength| code units
+ * starting at |utf8Chars|, interpreted as UTF-8.
+ *
+ * Throws if the code units do not contain valid UTF-8.
+ */
+extern JSAtom* AtomizeUTF8Chars(JSContext* cx, const char* utf8Chars,
+ size_t utf8ByteLength);
+
+extern JSAtom* AtomizeString(JSContext* cx, JSString* str);
+
+template <AllowGC allowGC>
+extern JSAtom* ToAtom(JSContext* cx,
+ typename MaybeRooted<JS::Value, allowGC>::HandleType v);
+
+/*
+ * Pin an atom so that it is never collected. Avoid using this if possible.
+ *
+ * This function does not GC.
+ */
+extern bool PinAtom(JSContext* cx, JSAtom* atom);
+
+#ifdef ENABLE_RECORD_TUPLE
+extern bool EnsureAtomized(JSContext* cx, MutableHandleValue v, bool* updated);
+#endif
+
+extern JS::Handle<PropertyName*> ClassName(JSProtoKey key, JSContext* cx);
+
+#ifdef DEBUG
+
+bool AtomIsMarked(JS::Zone* zone, JSAtom* atom);
+bool AtomIsMarked(JS::Zone* zone, jsid id);
+bool AtomIsMarked(JS::Zone* zone, const JS::Value& value);
+
+#endif // DEBUG
+
+} /* namespace js */
+
+#endif /* vm_JSAtom_h */
diff --git a/js/src/vm/JSAtomState.h b/js/src/vm/JSAtomState.h
new file mode 100644
index 0000000000..890e69da16
--- /dev/null
+++ b/js/src/vm/JSAtomState.h
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_JSAtomState_h
+#define vm_JSAtomState_h
+
+#include "gc/Barrier.h"
+#include "js/ProtoKey.h"
+#include "js/Symbol.h"
+#include "vm/CommonPropertyNames.h"
+
+namespace js {
+class PropertyName;
+} // namespace js
+
+/* Various built-in or commonly-used names pinned on first context. */
+struct JSAtomState {
+#define PROPERTYNAME_FIELD(idpart, id, text) \
+ js::ImmutableTenuredPtr<js::PropertyName*> id;
+ FOR_EACH_COMMON_PROPERTYNAME(PROPERTYNAME_FIELD)
+#undef PROPERTYNAME_FIELD
+#define PROPERTYNAME_FIELD(name, clasp) \
+ js::ImmutableTenuredPtr<js::PropertyName*> name;
+ JS_FOR_EACH_PROTOTYPE(PROPERTYNAME_FIELD)
+#undef PROPERTYNAME_FIELD
+#define PROPERTYNAME_FIELD(name) \
+ js::ImmutableTenuredPtr<js::PropertyName*> name;
+ JS_FOR_EACH_WELL_KNOWN_SYMBOL(PROPERTYNAME_FIELD)
+#undef PROPERTYNAME_FIELD
+#define PROPERTYNAME_FIELD(name) \
+ js::ImmutableTenuredPtr<js::PropertyName*> Symbol_##name;
+ JS_FOR_EACH_WELL_KNOWN_SYMBOL(PROPERTYNAME_FIELD)
+#undef PROPERTYNAME_FIELD
+
+ js::ImmutableTenuredPtr<js::PropertyName*>* wellKnownSymbolNames() {
+#define FIRST_PROPERTYNAME_FIELD(name) return &name;
+ JS_FOR_EACH_WELL_KNOWN_SYMBOL(FIRST_PROPERTYNAME_FIELD)
+#undef FIRST_PROPERTYNAME_FIELD
+ }
+
+ js::ImmutableTenuredPtr<js::PropertyName*>* wellKnownSymbolDescriptions() {
+#define FIRST_PROPERTYNAME_FIELD(name) return &Symbol_##name;
+ JS_FOR_EACH_WELL_KNOWN_SYMBOL(FIRST_PROPERTYNAME_FIELD)
+#undef FIRST_PROPERTYNAME_FIELD
+ }
+};
+
+namespace js {
+
+#define NAME_OFFSET(name) offsetof(JSAtomState, name)
+
+inline Handle<PropertyName*> AtomStateOffsetToName(const JSAtomState& atomState,
+ size_t offset) {
+ return *reinterpret_cast<js::ImmutableTenuredPtr<js::PropertyName*>*>(
+ (char*)&atomState + offset);
+}
+
+} /* namespace js */
+
+#endif /* vm_JSAtomState_h */
diff --git a/js/src/vm/JSContext-inl.h b/js/src/vm/JSContext-inl.h
new file mode 100644
index 0000000000..fe6d1f303f
--- /dev/null
+++ b/js/src/vm/JSContext-inl.h
@@ -0,0 +1,407 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_JSContext_inl_h
+#define vm_JSContext_inl_h
+
+#include "vm/JSContext.h"
+
+#include <type_traits>
+
+#include "gc/Marking.h"
+#include "gc/Zone.h"
+#include "jit/JitFrames.h"
+#include "util/DiagnosticAssertions.h"
+#include "vm/BigIntType.h"
+#include "vm/GlobalObject.h"
+#include "vm/Realm.h"
+
+#include "vm/Activation-inl.h" // js::Activation::hasWasmExitFP
+
+namespace js {
+
+class ContextChecks {
+ JSContext* cx;
+
+ JS::Realm* realm() const { return cx->realm(); }
+ JS::Compartment* compartment() const { return cx->compartment(); }
+ JS::Zone* zone() const { return cx->zone(); }
+
+ public:
+ explicit ContextChecks(JSContext* cx) : cx(cx) {
+#ifdef DEBUG
+ if (realm()) {
+ GlobalObject* global = realm()->unsafeUnbarrieredMaybeGlobal();
+ if (global) {
+ checkObject(global);
+ }
+ }
+#endif
+ }
+
+ /*
+ * Set a breakpoint here (break js::ContextChecks::fail) to debug
+ * realm/compartment/zone mismatches.
+ */
+ static void fail(JS::Realm* r1, JS::Realm* r2, int argIndex) {
+ MOZ_CRASH_UNSAFE_PRINTF("*** Realm mismatch %p vs. %p at argument %d", r1,
+ r2, argIndex);
+ }
+ static void fail(JS::Compartment* c1, JS::Compartment* c2, int argIndex) {
+ MOZ_CRASH_UNSAFE_PRINTF("*** Compartment mismatch %p vs. %p at argument %d",
+ c1, c2, argIndex);
+ }
+ static void fail(JS::Zone* z1, JS::Zone* z2, int argIndex) {
+ MOZ_CRASH_UNSAFE_PRINTF("*** Zone mismatch %p vs. %p at argument %d", z1,
+ z2, argIndex);
+ }
+
+ void check(JS::Realm* r, int argIndex) {
+ if (r && r != realm()) {
+ fail(realm(), r, argIndex);
+ }
+ }
+
+ void check(JS::Compartment* c, int argIndex) {
+ if (c && c != compartment()) {
+ fail(compartment(), c, argIndex);
+ }
+ }
+
+ void check(JS::Zone* z, int argIndex) {
+ if (zone() && z != zone()) {
+ fail(zone(), z, argIndex);
+ }
+ }
+
+ void check(JSObject* obj, int argIndex) {
+ if (obj) {
+ checkObject(obj);
+ check(obj->compartment(), argIndex);
+ }
+ }
+
+ void checkObject(JSObject* obj) {
+ JS::AssertObjectIsNotGray(obj);
+ MOZ_ASSERT(!js::gc::IsAboutToBeFinalizedUnbarriered(obj));
+ }
+
+ template <typename T>
+ void checkAtom(T* thing, int argIndex) {
+ static_assert(std::is_same_v<T, JSAtom> || std::is_same_v<T, JS::Symbol>,
+ "Should only be called with JSAtom* or JS::Symbol* argument");
+
+#ifdef DEBUG
+ // Atoms which move across zone boundaries need to be marked in the new
+ // zone, see JS_MarkCrossZoneId.
+ if (zone()) {
+ if (!cx->runtime()->gc.atomMarking.atomIsMarked(zone(), thing)) {
+ MOZ_CRASH_UNSAFE_PRINTF(
+ "*** Atom not marked for zone %p at argument %d", zone(), argIndex);
+ }
+ }
+#endif
+ }
+
+ void check(JSString* str, int argIndex) {
+ JS::AssertCellIsNotGray(str);
+ if (str->isAtom()) {
+ checkAtom(&str->asAtom(), argIndex);
+ } else {
+ check(str->zone(), argIndex);
+ }
+ }
+
+ void check(JS::Symbol* symbol, int argIndex) { checkAtom(symbol, argIndex); }
+
+ void check(JS::BigInt* bi, int argIndex) { check(bi->zone(), argIndex); }
+
+ void check(const js::Value& v, int argIndex) {
+ if (v.isObject()) {
+ check(&v.toObject(), argIndex);
+ } else if (v.isString()) {
+ check(v.toString(), argIndex);
+ } else if (v.isSymbol()) {
+ check(v.toSymbol(), argIndex);
+ } else if (v.isBigInt()) {
+ check(v.toBigInt(), argIndex);
+ }
+ }
+
+ // Check the contents of any container class that supports the C++
+ // iteration protocol, eg GCVector<jsid>.
+ template <typename Container>
+ std::enable_if_t<std::is_same_v<decltype(std::declval<Container>().begin()),
+ decltype(std::declval<Container>().end())>>
+ check(const Container& container, int argIndex) {
+ for (auto i : container) {
+ check(i, argIndex);
+ }
+ }
+
+ void check(const JS::HandleValueArray& arr, int argIndex) {
+ for (size_t i = 0; i < arr.length(); i++) {
+ check(arr[i], argIndex);
+ }
+ }
+
+ void check(const CallArgs& args, int argIndex) {
+ for (Value* p = args.base(); p != args.end(); ++p) {
+ check(*p, argIndex);
+ }
+ }
+
+ void check(jsid id, int argIndex) {
+ if (id.isAtom()) {
+ checkAtom(id.toAtom(), argIndex);
+ } else if (id.isSymbol()) {
+ checkAtom(id.toSymbol(), argIndex);
+ } else {
+ MOZ_ASSERT(!id.isGCThing());
+ }
+ }
+
+ void check(JSScript* script, int argIndex) {
+ JS::AssertCellIsNotGray(script);
+ if (script) {
+ check(script->realm(), argIndex);
+ }
+ }
+
+ void check(AbstractFramePtr frame, int argIndex);
+
+ void check(const PropertyDescriptor& desc, int argIndex) {
+ if (desc.hasGetter()) {
+ check(desc.getter(), argIndex);
+ }
+ if (desc.hasSetter()) {
+ check(desc.setter(), argIndex);
+ }
+ if (desc.hasValue()) {
+ check(desc.value(), argIndex);
+ }
+ }
+
+ void check(Handle<mozilla::Maybe<Value>> maybe, int argIndex) {
+ if (maybe.get().isSome()) {
+ check(maybe.get().ref(), argIndex);
+ }
+ }
+
+ void check(Handle<mozilla::Maybe<PropertyDescriptor>> maybe, int argIndex) {
+ if (maybe.get().isSome()) {
+ check(maybe.get().ref(), argIndex);
+ }
+ }
+};
+
+} // namespace js
+
+template <class... Args>
+inline void JSContext::checkImpl(const Args&... args) {
+ int argIndex = 0;
+ (..., js::ContextChecks(this).check(args, argIndex++));
+}
+
+template <class... Args>
+inline void JSContext::check(const Args&... args) {
+#ifdef JS_CRASH_DIAGNOSTICS
+ if (contextChecksEnabled()) {
+ checkImpl(args...);
+ }
+#endif
+}
+
+template <class... Args>
+inline void JSContext::releaseCheck(const Args&... args) {
+ if (contextChecksEnabled()) {
+ checkImpl(args...);
+ }
+}
+
+template <class... Args>
+MOZ_ALWAYS_INLINE void JSContext::debugOnlyCheck(const Args&... args) {
+#if defined(DEBUG) && defined(JS_CRASH_DIAGNOSTICS)
+ if (contextChecksEnabled()) {
+ checkImpl(args...);
+ }
+#endif
+}
+
+namespace js {
+
+STATIC_PRECONDITION_ASSUME(ubound(args.argv_) >= argc)
+MOZ_ALWAYS_INLINE bool CallNativeImpl(JSContext* cx, NativeImpl impl,
+ const CallArgs& args) {
+#ifdef DEBUG
+ bool alreadyThrowing = cx->isExceptionPending();
+#endif
+ cx->check(args);
+ bool ok = impl(cx, args);
+ if (ok) {
+ cx->check(args.rval());
+ MOZ_ASSERT_IF(!alreadyThrowing, !cx->isExceptionPending());
+ }
+ return ok;
+}
+
+MOZ_ALWAYS_INLINE bool CheckForInterrupt(JSContext* cx) {
+ MOZ_ASSERT(!cx->isExceptionPending());
+ // Add an inline fast-path since we have to check for interrupts in some hot
+ // C++ loops of library builtins.
+ if (MOZ_UNLIKELY(cx->hasAnyPendingInterrupt())) {
+ return cx->handleInterrupt();
+ }
+
+ JS_INTERRUPT_POSSIBLY_FAIL();
+
+ return true;
+}
+
+} /* namespace js */
+
+inline js::Nursery& JSContext::nursery() { return runtime()->gc.nursery(); }
+
+inline void JSContext::minorGC(JS::GCReason reason) {
+ runtime()->gc.minorGC(reason);
+}
+
+inline bool JSContext::runningWithTrustedPrincipals() {
+ if (!realm()) {
+ return true;
+ }
+ if (!runtime()->trustedPrincipals()) {
+ return false;
+ }
+ return realm()->principals() == runtime()->trustedPrincipals();
+}
+
+inline void JSContext::enterRealm(JS::Realm* realm) {
+ // We should never enter a realm while in the atoms zone.
+ MOZ_ASSERT_IF(zone(), !zone()->isAtomsZone());
+
+ realm->enter();
+ setRealm(realm);
+}
+
+inline void JSContext::enterAtomsZone() {
+ realm_ = nullptr;
+ setZone(runtime_->unsafeAtomsZone());
+}
+
+inline void JSContext::setZone(js::Zone* zone) {
+ MOZ_ASSERT(!isHelperThreadContext());
+ zone_ = zone;
+}
+
+inline void JSContext::enterRealmOf(JSObject* target) {
+ JS::AssertCellIsNotGray(target);
+ enterRealm(target->nonCCWRealm());
+}
+
+inline void JSContext::enterRealmOf(JSScript* target) {
+ JS::AssertCellIsNotGray(target);
+ enterRealm(target->realm());
+}
+
+inline void JSContext::enterRealmOf(js::Shape* target) {
+ JS::AssertCellIsNotGray(target);
+ enterRealm(target->realm());
+}
+
+inline void JSContext::enterNullRealm() {
+ // We should never enter a realm while in the atoms zone.
+ MOZ_ASSERT_IF(zone(), !zone()->isAtomsZone());
+
+ setRealm(nullptr);
+}
+
+inline void JSContext::leaveRealm(JS::Realm* oldRealm) {
+ // Only call leave() after we've setRealm()-ed away from the current realm.
+ JS::Realm* startingRealm = realm_;
+
+ // The current realm should be marked as entered-from-C++ at this point.
+ MOZ_ASSERT_IF(startingRealm, startingRealm->hasBeenEnteredIgnoringJit());
+
+ setRealm(oldRealm);
+
+ if (startingRealm) {
+ startingRealm->leave();
+ }
+}
+
+inline void JSContext::leaveAtomsZone(JS::Realm* oldRealm) {
+ setRealm(oldRealm);
+}
+
+inline void JSContext::setRealm(JS::Realm* realm) {
+ realm_ = realm;
+ if (realm) {
+ // This thread must have exclusive access to the zone.
+ MOZ_ASSERT(CurrentThreadCanAccessZone(realm->zone()));
+ MOZ_ASSERT(!realm->zone()->isAtomsZone());
+ setZone(realm->zone());
+ } else {
+ setZone(nullptr);
+ }
+}
+
+inline void JSContext::setRealmForJitExceptionHandler(JS::Realm* realm) {
+ // JIT code enters (same-compartment) realms without calling realm->enter()
+ // so we don't call realm->leave() here.
+ MOZ_ASSERT(realm->compartment() == compartment());
+ realm_ = realm;
+}
+
+inline JSScript* JSContext::currentScript(
+ jsbytecode** ppc, AllowCrossRealm allowCrossRealm) const {
+ if (ppc) {
+ *ppc = nullptr;
+ }
+
+ js::Activation* act = activation();
+ if (!act) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(act->cx() == this);
+
+ // Cross-compartment implies cross-realm.
+ if (allowCrossRealm == AllowCrossRealm::DontAllow &&
+ act->compartment() != compartment()) {
+ return nullptr;
+ }
+
+ JSScript* script = nullptr;
+ jsbytecode* pc = nullptr;
+ if (act->isJit()) {
+ if (act->hasWasmExitFP()) {
+ return nullptr;
+ }
+ js::jit::GetPcScript(const_cast<JSContext*>(this), &script, &pc);
+ } else {
+ js::InterpreterFrame* fp = act->asInterpreter()->current();
+ MOZ_ASSERT(!fp->runningInJit());
+ script = fp->script();
+ pc = act->asInterpreter()->regs().pc;
+ }
+
+ MOZ_ASSERT(script->containsPC(pc));
+
+ if (allowCrossRealm == AllowCrossRealm::DontAllow &&
+ script->realm() != realm()) {
+ return nullptr;
+ }
+
+ if (ppc) {
+ *ppc = pc;
+ }
+ return script;
+}
+
+inline js::RuntimeCaches& JSContext::caches() { return runtime()->caches(); }
+
+#endif /* vm_JSContext_inl_h */
diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
new file mode 100644
index 0000000000..fd24c4d673
--- /dev/null
+++ b/js/src/vm/JSContext.cpp
@@ -0,0 +1,1386 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * JS execution context.
+ */
+
+#include "vm/JSContext-inl.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Sprintf.h"
+#include "mozilla/Utf8.h" // mozilla::ConvertUtf16ToUtf8
+
+#include <string.h>
+#ifdef ANDROID
+# include <android/log.h>
+# include <fstream>
+# include <string>
+#endif // ANDROID
+#ifdef XP_WIN
+# include <processthreadsapi.h>
+#endif // XP_WIN
+
+#include "jsapi.h" // JS_SetNativeStackQuota
+#include "jsexn.h"
+#include "jstypes.h"
+
+#include "frontend/FrontendContext.h"
+#include "gc/GC.h"
+#include "irregexp/RegExpAPI.h"
+#include "jit/Simulator.h"
+#include "js/CallAndConstruct.h" // JS::Call
+#include "js/CharacterEncoding.h"
+#include "js/ContextOptions.h" // JS::ContextOptions
+#include "js/ErrorInterceptor.h" // JSErrorInterceptor
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/friend/StackLimits.h" // js::ReportOverRecursed
+#include "js/MemoryCallbacks.h"
+#include "js/Printf.h"
+#include "js/PropertyAndElement.h" // JS_GetProperty
+#include "js/Stack.h" // JS::NativeStackSize, JS::NativeStackLimit, JS::NativeStackLimitMin
+#include "util/DiagnosticAssertions.h"
+#include "util/DifferentialTesting.h"
+#include "util/DoubleToString.h"
+#include "util/NativeStack.h"
+#include "util/Text.h"
+#include "util/WindowsWrapper.h"
+#include "vm/BytecodeUtil.h" // JSDVG_IGNORE_STACK
+#include "vm/ErrorObject.h"
+#include "vm/ErrorReporting.h"
+#include "vm/JSFunction.h"
+#include "vm/JSObject.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/Realm.h"
+#include "vm/StringType.h" // StringToNewUTF8CharsZ
+#include "vm/ToSource.h" // js::ValueToSource
+#include "vm/WellKnownAtom.h" // js_*_str
+
+#include "vm/Compartment-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+
+#ifdef DEBUG
+JSContext* js::MaybeGetJSContext() {
+ if (!TlsContext.init()) {
+ return nullptr;
+ }
+ return TlsContext.get();
+}
+#endif
+
+bool js::AutoCycleDetector::init() {
+ MOZ_ASSERT(cyclic);
+
+ AutoCycleDetector::Vector& vector = cx->cycleDetectorVector();
+
+ for (JSObject* obj2 : vector) {
+ if (MOZ_UNLIKELY(obj == obj2)) {
+ return true;
+ }
+ }
+
+ if (!vector.append(obj)) {
+ return false;
+ }
+
+ cyclic = false;
+ return true;
+}
+
+js::AutoCycleDetector::~AutoCycleDetector() {
+ if (MOZ_LIKELY(!cyclic)) {
+ AutoCycleDetector::Vector& vec = cx->cycleDetectorVector();
+ MOZ_ASSERT(vec.back() == obj);
+ if (vec.length() > 1) {
+ vec.popBack();
+ } else {
+ // Avoid holding on to unused heap allocations.
+ vec.clearAndFree();
+ }
+ }
+}
+
+bool JSContext::init(ContextKind kind) {
+ // Skip most of the initialization if this thread will not be running JS.
+ if (kind == ContextKind::MainThread) {
+ TlsContext.set(this);
+ currentThread_ = ThreadId::ThisThreadId();
+ nativeStackBase_.emplace(GetNativeStackBase());
+
+ if (!fx.initInstance()) {
+ return false;
+ }
+
+#ifdef JS_SIMULATOR
+ simulator_ = jit::Simulator::Create();
+ if (!simulator_) {
+ return false;
+ }
+#endif
+ }
+
+ isolate = irregexp::CreateIsolate(this);
+ if (!isolate) {
+ return false;
+ }
+
+ // Set the ContextKind last, so that ProtectedData checks will allow us to
+ // initialize this context before it becomes the runtime's active context.
+ kind_ = kind;
+
+ return true;
+}
+
+static void InitDefaultStackQuota(JSContext* cx) {
+ // Initialize stack quota to a reasonable default. Embedders can override this
+ // by calling JS_SetNativeStackQuota.
+ //
+ // NOTE: Firefox overrides these values. For the main thread this happens in
+ // XPCJSContext::Initialize.
+
+#if defined(MOZ_ASAN) || (defined(DEBUG) && !defined(XP_WIN))
+ static constexpr JS::NativeStackSize MaxStackSize =
+ 2 * 128 * sizeof(size_t) * 1024;
+#else
+ static constexpr JS::NativeStackSize MaxStackSize =
+ 128 * sizeof(size_t) * 1024;
+#endif
+ JS_SetNativeStackQuota(cx, MaxStackSize);
+}
+
+JSContext* js::NewContext(uint32_t maxBytes, JSRuntime* parentRuntime) {
+ AutoNoteSingleThreadedRegion anstr;
+
+ MOZ_RELEASE_ASSERT(!TlsContext.get());
+
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ js::oom::SetThreadType(!parentRuntime ? js::THREAD_TYPE_MAIN
+ : js::THREAD_TYPE_WORKER);
+#endif
+
+ JSRuntime* runtime = js_new<JSRuntime>(parentRuntime);
+ if (!runtime) {
+ return nullptr;
+ }
+
+ JSContext* cx = js_new<JSContext>(runtime, JS::ContextOptions());
+ if (!cx) {
+ js_delete(runtime);
+ return nullptr;
+ }
+
+ if (!cx->init(ContextKind::MainThread)) {
+ js_delete(cx);
+ js_delete(runtime);
+ return nullptr;
+ }
+
+ if (!runtime->init(cx, maxBytes)) {
+ runtime->destroyRuntime();
+ js_delete(cx);
+ js_delete(runtime);
+ return nullptr;
+ }
+
+ // Initialize stack quota last because simulators rely on the JSRuntime having
+ // been initialized.
+ if (cx->isMainThreadContext()) {
+ InitDefaultStackQuota(cx);
+ }
+
+ return cx;
+}
+
+void js::DestroyContext(JSContext* cx) {
+ JS_AbortIfWrongThread(cx);
+
+ MOZ_ASSERT(!cx->realm(), "Shouldn't destroy context with active realm");
+ MOZ_ASSERT(!cx->activation(), "Shouldn't destroy context with activations");
+
+ cx->checkNoGCRooters();
+
+ // Cancel all off thread Ion compiles. Completed Ion compiles may try to
+ // interrupt this context. See HelperThread::handleIonWorkload.
+ CancelOffThreadIonCompile(cx->runtime());
+
+ cx->jobQueue = nullptr;
+ cx->internalJobQueue = nullptr;
+ SetContextProfilingStack(cx, nullptr);
+
+ JSRuntime* rt = cx->runtime();
+
+ // Flush promise tasks executing in helper threads early, before any parts
+ // of the JSRuntime that might be visible to helper threads are torn down.
+ rt->offThreadPromiseState.ref().shutdown(cx);
+
+ // Destroy the runtime along with its last context.
+ js::AutoNoteSingleThreadedRegion nochecks;
+ rt->destroyRuntime();
+ js_delete_poison(cx);
+ js_delete_poison(rt);
+}
+
+void JS::RootingContext::checkNoGCRooters() {
+#ifdef DEBUG
+ for (auto const& stackRootPtr : stackRoots_) {
+ MOZ_ASSERT(stackRootPtr == nullptr);
+ }
+#endif
+}
+
+bool AutoResolving::alreadyStartedSlow() const {
+ MOZ_ASSERT(link);
+ AutoResolving* cursor = link;
+ do {
+ MOZ_ASSERT(this != cursor);
+ if (object.get() == cursor->object && id.get() == cursor->id &&
+ kind == cursor->kind) {
+ return true;
+ }
+ } while (!!(cursor = cursor->link));
+ return false;
+}
+
+static void MaybeReportOutOfMemoryForDifferentialTesting() {
+ /*
+ * OOMs are non-deterministic, especially across different execution modes
+ * (e.g. interpreter vs JIT). When doing differential testing, print to stderr
+ * so that the fuzzers can detect this.
+ */
+ if (js::SupportDifferentialTesting()) {
+ fprintf(stderr, "ReportOutOfMemory called\n");
+ }
+}
+
+/*
+ * Since memory has been exhausted, avoid the normal error-handling path which
+ * allocates an error object, report and callstack. Instead simply throw the
+ * static atom "out of memory".
+ *
+ * Furthermore, callers of ReportOutOfMemory (viz., malloc) assume a GC does
+ * not occur, so GC must be avoided or suppressed.
+ */
+void JSContext::onOutOfMemory() {
+ runtime()->hadOutOfMemory = true;
+ gc::AutoSuppressGC suppressGC(this);
+
+ /* Report the oom. */
+ if (JS::OutOfMemoryCallback oomCallback = runtime()->oomCallback) {
+ oomCallback(this, runtime()->oomCallbackData);
+ }
+
+ // If we OOM early in process startup, this may be unavailable so just return
+ // instead of crashing unexpectedly.
+ if (MOZ_UNLIKELY(!runtime()->hasInitializedSelfHosting())) {
+ return;
+ }
+
+ RootedValue oomMessage(this, StringValue(names().outOfMemory));
+ setPendingException(oomMessage, nullptr);
+ MOZ_ASSERT(status == JS::ExceptionStatus::Throwing);
+ status = JS::ExceptionStatus::OutOfMemory;
+
+ reportResourceExhaustion();
+}
+
+JS_PUBLIC_API void js::ReportOutOfMemory(JSContext* cx) {
+ MaybeReportOutOfMemoryForDifferentialTesting();
+
+ MOZ_ASSERT(cx->isMainThreadContext());
+
+ cx->onOutOfMemory();
+}
+
+JS_PUBLIC_API void js::ReportOutOfMemory(FrontendContext* fc) {
+ MaybeReportOutOfMemoryForDifferentialTesting();
+
+ fc->onOutOfMemory();
+}
+
+static void MaybeReportOverRecursedForDifferentialTesting() {
+ /*
+ * We cannot make stack depth deterministic across different
+ * implementations (e.g. JIT vs. interpreter will differ in
+ * their maximum stack depth).
+ * However, we can detect externally when we hit the maximum
+ * stack depth which is useful for external testing programs
+ * like fuzzers.
+ */
+ if (js::SupportDifferentialTesting()) {
+ fprintf(stderr, "ReportOverRecursed called\n");
+ }
+}
+
+void JSContext::onOverRecursed() {
+ if (isHelperThreadContext()) {
+ addPendingOverRecursed();
+ } else {
+ // Try to construct an over-recursed error and then update the exception
+ // status to `OverRecursed`. Creating the error can fail, so check there
+ // is a reasonable looking exception pending before updating status.
+ JS_ReportErrorNumberASCII(this, GetErrorMessage, nullptr,
+ JSMSG_OVER_RECURSED);
+ if (isExceptionPending() && !isThrowingOutOfMemory()) {
+ MOZ_ASSERT(unwrappedException().isObject());
+ MOZ_ASSERT(status == JS::ExceptionStatus::Throwing);
+ status = JS::ExceptionStatus::OverRecursed;
+ }
+ }
+
+ reportResourceExhaustion();
+}
+
+JS_PUBLIC_API void js::ReportOverRecursed(JSContext* maybecx) {
+ MaybeReportOverRecursedForDifferentialTesting();
+
+ if (!maybecx) {
+ return;
+ }
+ MOZ_ASSERT(maybecx->isMainThreadContext());
+
+ maybecx->onOverRecursed();
+}
+
+JS_PUBLIC_API void js::ReportOverRecursed(FrontendContext* fc) {
+ MaybeReportOverRecursedForDifferentialTesting();
+
+ fc->onOverRecursed();
+}
+
+void js::ReportOversizedAllocation(JSContext* cx, const unsigned errorNumber) {
+ // The JIT may optimize away allocations if it determines that they aren't
+ // used. This can affect whether we throw an exception when the size of an
+ // allocation exceeds implementation-defined limits (eg JSString::MAX_LENGTH).
+ // These errors aren't interesting for the purposes of differential fuzzing.
+ // We print a message so that fuzzers can detect this case. To simplify
+ // tooling updates, we use the same message as ReportOutOfMemory.
+ if (js::SupportDifferentialTesting()) {
+ fprintf(stderr, "ReportOutOfMemory called\n");
+ }
+
+ gc::AutoSuppressGC suppressGC(cx);
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, errorNumber);
+
+ cx->reportResourceExhaustion();
+}
+
+void js::ReportAllocationOverflow(JSContext* cx) {
+ if (js::SupportDifferentialTesting()) {
+ fprintf(stderr, "ReportAllocationOverflow called\n");
+ }
+
+ if (!cx) {
+ return;
+ }
+ MOZ_ASSERT(cx->isMainThreadContext());
+
+ cx->reportAllocationOverflow();
+}
+
+void js::ReportAllocationOverflow(FrontendContext* fc) {
+ fc->onAllocationOverflow();
+}
+
+/* |callee| requires a usage string provided by JS_DefineFunctionsWithHelp. */
+void js::ReportUsageErrorASCII(JSContext* cx, HandleObject callee,
+ const char* msg) {
+ RootedValue usage(cx);
+ if (!JS_GetProperty(cx, callee, "usage", &usage)) {
+ return;
+ }
+
+ if (!usage.isString()) {
+ JS_ReportErrorASCII(cx, "%s", msg);
+ } else {
+ RootedString usageStr(cx, usage.toString());
+ UniqueChars str = JS_EncodeStringToUTF8(cx, usageStr);
+ if (!str) {
+ return;
+ }
+ JS_ReportErrorUTF8(cx, "%s. Usage: %s", msg, str.get());
+ }
+}
+
+enum class PrintErrorKind { Error, Warning, Note };
+
+static void PrintErrorLine(FILE* file, const char* prefix,
+ JSErrorReport* report) {
+ if (const char16_t* linebuf = report->linebuf()) {
+ UniqueChars line;
+ size_t n;
+ {
+ size_t linebufLen = report->linebufLength();
+
+ // This function is only used for shell command-line sorts of stuff where
+ // performance doesn't really matter, so just encode into max-sized
+ // memory.
+ mozilla::CheckedInt<size_t> utf8Len(linebufLen);
+ utf8Len *= 3;
+ if (utf8Len.isValid()) {
+ line = UniqueChars(js_pod_malloc<char>(utf8Len.value()));
+ if (line) {
+ n = mozilla::ConvertUtf16toUtf8({linebuf, linebufLen},
+ {line.get(), utf8Len.value()});
+ }
+ }
+ }
+
+ const char* utf8buf;
+ if (line) {
+ utf8buf = line.get();
+ } else {
+ static const char unavailableStr[] = "<context unavailable>";
+ utf8buf = unavailableStr;
+ n = js_strlen(unavailableStr);
+ }
+
+ fputs(":\n", file);
+ if (prefix) {
+ fputs(prefix, file);
+ }
+
+ for (size_t i = 0; i < n; i++) {
+ fputc(utf8buf[i], file);
+ }
+
+ // linebuf/utf8buf usually ends with a newline. If not, add one here.
+ if (n == 0 || utf8buf[n - 1] != '\n') {
+ fputc('\n', file);
+ }
+
+ if (prefix) {
+ fputs(prefix, file);
+ }
+
+ n = report->tokenOffset();
+ for (size_t i = 0, j = 0; i < n; i++) {
+ if (utf8buf[i] == '\t') {
+ for (size_t k = (j + 8) & ~7; j < k; j++) {
+ fputc('.', file);
+ }
+ continue;
+ }
+ fputc('.', file);
+ j++;
+ }
+ fputc('^', file);
+ }
+}
+
+static void PrintErrorLine(FILE* file, const char* prefix,
+ JSErrorNotes::Note* note) {}
+
+template <typename T>
+static void PrintSingleError(FILE* file, JS::ConstUTF8CharsZ toStringResult,
+ T* report, PrintErrorKind kind) {
+ UniqueChars prefix;
+ if (report->filename) {
+ prefix = JS_smprintf("%s:", report->filename);
+ }
+
+ if (report->lineno) {
+ prefix = JS_smprintf("%s%u:%u ", prefix ? prefix.get() : "", report->lineno,
+ report->column);
+ }
+
+ if (kind != PrintErrorKind::Error) {
+ const char* kindPrefix = nullptr;
+ switch (kind) {
+ case PrintErrorKind::Error:
+ MOZ_CRASH("unreachable");
+ case PrintErrorKind::Warning:
+ kindPrefix = "warning";
+ break;
+ case PrintErrorKind::Note:
+ kindPrefix = "note";
+ break;
+ }
+
+ prefix = JS_smprintf("%s%s: ", prefix ? prefix.get() : "", kindPrefix);
+ }
+
+ const char* message =
+ toStringResult ? toStringResult.c_str() : report->message().c_str();
+
+ /* embedded newlines -- argh! */
+ const char* ctmp;
+ while ((ctmp = strchr(message, '\n')) != 0) {
+ ctmp++;
+ if (prefix) {
+ fputs(prefix.get(), file);
+ }
+ (void)fwrite(message, 1, ctmp - message, file);
+ message = ctmp;
+ }
+
+ /* If there were no filename or lineno, the prefix might be empty */
+ if (prefix) {
+ fputs(prefix.get(), file);
+ }
+ fputs(message, file);
+
+ PrintErrorLine(file, prefix.get(), report);
+ fputc('\n', file);
+
+ fflush(file);
+}
+
+static void PrintErrorImpl(FILE* file, JS::ConstUTF8CharsZ toStringResult,
+ JSErrorReport* report, bool reportWarnings) {
+ MOZ_ASSERT(report);
+
+ /* Conditionally ignore reported warnings. */
+ if (report->isWarning() && !reportWarnings) {
+ return;
+ }
+
+ PrintErrorKind kind = PrintErrorKind::Error;
+ if (report->isWarning()) {
+ kind = PrintErrorKind::Warning;
+ }
+ PrintSingleError(file, toStringResult, report, kind);
+
+ if (report->notes) {
+ for (auto&& note : *report->notes) {
+ PrintSingleError(file, JS::ConstUTF8CharsZ(), note.get(),
+ PrintErrorKind::Note);
+ }
+ }
+}
+
+JS_PUBLIC_API void JS::PrintError(FILE* file, JSErrorReport* report,
+ bool reportWarnings) {
+ PrintErrorImpl(file, JS::ConstUTF8CharsZ(), report, reportWarnings);
+}
+
+JS_PUBLIC_API void JS::PrintError(FILE* file,
+ const JS::ErrorReportBuilder& builder,
+ bool reportWarnings) {
+ PrintErrorImpl(file, builder.toStringResult(), builder.report(),
+ reportWarnings);
+}
+
+void js::ReportIsNotDefined(JSContext* cx, HandleId id) {
+ if (UniqueChars printable =
+ IdToPrintableUTF8(cx, id, IdToPrintableBehavior::IdIsIdentifier)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_NOT_DEFINED,
+ printable.get());
+ }
+}
+
+void js::ReportIsNotDefined(JSContext* cx, Handle<PropertyName*> name) {
+ RootedId id(cx, NameToId(name));
+ ReportIsNotDefined(cx, id);
+}
+
+const char* NullOrUndefinedToCharZ(HandleValue v) {
+ MOZ_ASSERT(v.isNullOrUndefined());
+ return v.isNull() ? js_null_str : js_undefined_str;
+}
+
+void js::ReportIsNullOrUndefinedForPropertyAccess(JSContext* cx, HandleValue v,
+ int vIndex) {
+ MOZ_ASSERT(v.isNullOrUndefined());
+
+ if (vIndex == JSDVG_IGNORE_STACK) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CANT_CONVERT_TO, NullOrUndefinedToCharZ(v),
+ "object");
+ return;
+ }
+
+ UniqueChars bytes = DecompileValueGenerator(cx, vIndex, v, nullptr);
+ if (!bytes) {
+ return;
+ }
+
+ if (strcmp(bytes.get(), js_undefined_str) == 0 ||
+ strcmp(bytes.get(), js_null_str) == 0) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_NO_PROPERTIES,
+ bytes.get());
+ } else {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_UNEXPECTED_TYPE, bytes.get(),
+ NullOrUndefinedToCharZ(v));
+ }
+}
+
+void js::ReportIsNullOrUndefinedForPropertyAccess(JSContext* cx, HandleValue v,
+ int vIndex, HandleId key) {
+ MOZ_ASSERT(v.isNullOrUndefined());
+
+ if (!cx->realm()->creationOptions().getPropertyErrorMessageFixEnabled()) {
+ ReportIsNullOrUndefinedForPropertyAccess(cx, v, vIndex);
+ return;
+ }
+
+ RootedValue idVal(cx, IdToValue(key));
+ RootedString idStr(cx, ValueToSource(cx, idVal));
+ if (!idStr) {
+ return;
+ }
+
+ UniqueChars keyStr = StringToNewUTF8CharsZ(cx, *idStr);
+ if (!keyStr) {
+ return;
+ }
+
+ if (vIndex == JSDVG_IGNORE_STACK) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_PROPERTY_FAIL,
+ keyStr.get(), NullOrUndefinedToCharZ(v));
+ return;
+ }
+
+ UniqueChars bytes = DecompileValueGenerator(cx, vIndex, v, nullptr);
+ if (!bytes) {
+ return;
+ }
+
+ if (strcmp(bytes.get(), js_undefined_str) == 0 ||
+ strcmp(bytes.get(), js_null_str) == 0) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_PROPERTY_FAIL,
+ keyStr.get(), bytes.get());
+ return;
+ }
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_PROPERTY_FAIL_EXPR, keyStr.get(), bytes.get(),
+ NullOrUndefinedToCharZ(v));
+}
+
+bool js::ReportValueError(JSContext* cx, const unsigned errorNumber,
+ int spindex, HandleValue v, HandleString fallback,
+ const char* arg1, const char* arg2) {
+ MOZ_ASSERT(js_ErrorFormatString[errorNumber].argCount >= 1);
+ MOZ_ASSERT(js_ErrorFormatString[errorNumber].argCount <= 3);
+ UniqueChars bytes = DecompileValueGenerator(cx, spindex, v, fallback);
+ if (!bytes) {
+ return false;
+ }
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber,
+ bytes.get(), arg1, arg2);
+ return false;
+}
+
+JSObject* js::CreateErrorNotesArray(JSContext* cx, JSErrorReport* report) {
+ Rooted<ArrayObject*> notesArray(cx, NewDenseEmptyArray(cx));
+ if (!notesArray) {
+ return nullptr;
+ }
+
+ if (!report->notes) {
+ return notesArray;
+ }
+
+ for (auto&& note : *report->notes) {
+ Rooted<PlainObject*> noteObj(cx, NewPlainObject(cx));
+ if (!noteObj) {
+ return nullptr;
+ }
+
+ RootedString messageStr(cx, note->newMessageString(cx));
+ if (!messageStr) {
+ return nullptr;
+ }
+ RootedValue messageVal(cx, StringValue(messageStr));
+ if (!DefineDataProperty(cx, noteObj, cx->names().message, messageVal)) {
+ return nullptr;
+ }
+
+ RootedValue filenameVal(cx);
+ if (const char* filename = note->filename) {
+ JS::UTF8Chars utf8chars(filename, strlen(filename));
+ Rooted<JSString*> filenameStr(cx, NewStringCopyUTF8N(cx, utf8chars));
+ if (!filenameStr) {
+ return nullptr;
+ }
+ filenameVal = StringValue(filenameStr);
+ }
+ if (!DefineDataProperty(cx, noteObj, cx->names().fileName, filenameVal)) {
+ return nullptr;
+ }
+
+ RootedValue linenoVal(cx, Int32Value(note->lineno));
+ if (!DefineDataProperty(cx, noteObj, cx->names().lineNumber, linenoVal)) {
+ return nullptr;
+ }
+ RootedValue columnVal(cx, Int32Value(note->column));
+ if (!DefineDataProperty(cx, noteObj, cx->names().columnNumber, columnVal)) {
+ return nullptr;
+ }
+
+ if (!NewbornArrayPush(cx, notesArray, ObjectValue(*noteObj))) {
+ return nullptr;
+ }
+ }
+
+ return notesArray;
+}
+
+void JSContext::recoverFromOutOfMemory() {
+ if (isHelperThreadContext()) {
+ // Keep in sync with addPendingOutOfMemory.
+ if (FrontendErrors* errors = frontendErrors()) {
+ errors->outOfMemory = false;
+ }
+ } else {
+ if (isExceptionPending()) {
+ MOZ_ASSERT(isThrowingOutOfMemory());
+ clearPendingException();
+ }
+ }
+}
+
+void JSContext::reportAllocationOverflow() {
+ if (isHelperThreadContext()) {
+ return;
+ }
+
+ gc::AutoSuppressGC suppressGC(this);
+ JS_ReportErrorNumberASCII(this, GetErrorMessage, nullptr,
+ JSMSG_ALLOC_OVERFLOW);
+}
+
+JS::StackKind JSContext::stackKindForCurrentPrincipal() {
+ return runningWithTrustedPrincipals() ? JS::StackForTrustedScript
+ : JS::StackForUntrustedScript;
+}
+
+JS::NativeStackLimit JSContext::stackLimitForCurrentPrincipal() {
+ return stackLimit(stackKindForCurrentPrincipal());
+}
+
+JS_PUBLIC_API bool js::UseInternalJobQueues(JSContext* cx) {
+ // Internal job queue handling must be set up very early. Self-hosting
+ // initialization is as good a marker for that as any.
+ MOZ_RELEASE_ASSERT(
+ !cx->runtime()->hasInitializedSelfHosting(),
+ "js::UseInternalJobQueues must be called early during runtime startup.");
+ MOZ_ASSERT(!cx->jobQueue);
+ auto queue = MakeUnique<InternalJobQueue>(cx);
+ if (!queue) {
+ return false;
+ }
+
+ cx->internalJobQueue = std::move(queue);
+ cx->jobQueue = cx->internalJobQueue.ref().get();
+
+ cx->runtime()->offThreadPromiseState.ref().initInternalDispatchQueue();
+ MOZ_ASSERT(cx->runtime()->offThreadPromiseState.ref().initialized());
+
+ return true;
+}
+
+#ifdef DEBUG
+JSObject* InternalJobQueue::copyJobs(JSContext* cx) {
+ Rooted<ArrayObject*> jobs(cx, NewDenseEmptyArray(cx));
+ if (!jobs) {
+ return nullptr;
+ }
+
+ for (const JSObject* unwrappedJob : queue.get()) {
+ RootedObject job(cx, const_cast<JSObject*>(unwrappedJob));
+ if (!cx->compartment()->wrap(cx, &job)) {
+ return nullptr;
+ }
+
+ if (!NewbornArrayPush(cx, jobs, ObjectValue(*job))) {
+ return nullptr;
+ }
+ }
+
+ return jobs;
+}
+
+JS_PUBLIC_API JSObject* js::GetJobsInInternalJobQueue(JSContext* cx) {
+ MOZ_ASSERT(cx->internalJobQueue.ref());
+ return cx->internalJobQueue->copyJobs(cx);
+}
+#endif
+
+JS_PUBLIC_API bool js::EnqueueJob(JSContext* cx, JS::HandleObject job) {
+ MOZ_ASSERT(cx->jobQueue);
+ return cx->jobQueue->enqueuePromiseJob(cx, nullptr, job, nullptr, nullptr);
+}
+
+JS_PUBLIC_API void js::StopDrainingJobQueue(JSContext* cx) {
+ MOZ_ASSERT(cx->internalJobQueue.ref());
+ cx->internalJobQueue->interrupt();
+}
+
+JS_PUBLIC_API void js::RunJobs(JSContext* cx) {
+ MOZ_ASSERT(cx->jobQueue);
+ cx->jobQueue->runJobs(cx);
+ JS::ClearKeptObjects(cx);
+}
+
+JSObject* InternalJobQueue::getIncumbentGlobal(JSContext* cx) {
+ if (!cx->compartment()) {
+ return nullptr;
+ }
+ return cx->global();
+}
+
+bool InternalJobQueue::enqueuePromiseJob(JSContext* cx,
+ JS::HandleObject promise,
+ JS::HandleObject job,
+ JS::HandleObject allocationSite,
+ JS::HandleObject incumbentGlobal) {
+ MOZ_ASSERT(job);
+ if (!queue.pushBack(job)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ JS::JobQueueMayNotBeEmpty(cx);
+ return true;
+}
+
+void InternalJobQueue::runJobs(JSContext* cx) {
+ if (draining_ || interrupted_) {
+ return;
+ }
+
+ while (true) {
+ cx->runtime()->offThreadPromiseState.ref().internalDrain(cx);
+
+ // It doesn't make sense for job queue draining to be reentrant. At the
+ // same time we don't want to assert against it, because that'd make
+ // drainJobQueue unsafe for fuzzers. We do want fuzzers to test this,
+ // so we simply ignore nested calls of drainJobQueue.
+ draining_ = true;
+
+ RootedObject job(cx);
+ JS::HandleValueArray args(JS::HandleValueArray::empty());
+ RootedValue rval(cx);
+
+ // Execute jobs in a loop until we've reached the end of the queue.
+ while (!queue.empty()) {
+ // A previous job might have set this flag. E.g., the js shell
+ // sets it if the `quit` builtin function is called.
+ if (interrupted_) {
+ break;
+ }
+
+ job = queue.front();
+ queue.popFront();
+
+ // If the next job is the last job in the job queue, allow
+ // skipping the standard job queuing behavior.
+ if (queue.empty()) {
+ JS::JobQueueIsEmpty(cx);
+ }
+
+ AutoRealm ar(cx, &job->as<JSFunction>());
+ {
+ if (!JS::Call(cx, UndefinedHandleValue, job, args, &rval)) {
+ // Nothing we can do about uncatchable exceptions.
+ if (!cx->isExceptionPending()) {
+ continue;
+ }
+ RootedValue exn(cx);
+ if (cx->getPendingException(&exn)) {
+ /*
+ * Clear the exception, because
+ * PrepareScriptEnvironmentAndInvoke will assert that we don't
+ * have one.
+ */
+ cx->clearPendingException();
+ js::ReportExceptionClosure reportExn(exn);
+ PrepareScriptEnvironmentAndInvoke(cx, cx->global(), reportExn);
+ }
+ }
+ }
+ }
+
+ draining_ = false;
+
+ if (interrupted_) {
+ interrupted_ = false;
+ break;
+ }
+
+ queue.clear();
+
+ // It's possible a job added a new off-thread promise task.
+ if (!cx->runtime()->offThreadPromiseState.ref().internalHasPending()) {
+ break;
+ }
+ }
+}
+
+bool InternalJobQueue::empty() const { return queue.empty(); }
+
+JSObject* InternalJobQueue::maybeFront() const {
+ if (queue.empty()) {
+ return nullptr;
+ }
+
+ return queue.get().front();
+}
+
+class js::InternalJobQueue::SavedQueue : public JobQueue::SavedJobQueue {
+ public:
+ SavedQueue(JSContext* cx, Queue&& saved, bool draining)
+ : cx(cx), saved(cx, std::move(saved)), draining_(draining) {
+ MOZ_ASSERT(cx->internalJobQueue.ref());
+ }
+
+ ~SavedQueue() {
+ MOZ_ASSERT(cx->internalJobQueue.ref());
+ cx->internalJobQueue->queue = std::move(saved.get());
+ cx->internalJobQueue->draining_ = draining_;
+ }
+
+ private:
+ JSContext* cx;
+ PersistentRooted<Queue> saved;
+ bool draining_;
+};
+
+js::UniquePtr<JS::JobQueue::SavedJobQueue> InternalJobQueue::saveJobQueue(
+ JSContext* cx) {
+ auto saved =
+ js::MakeUnique<SavedQueue>(cx, std::move(queue.get()), draining_);
+ if (!saved) {
+ // When MakeUnique's allocation fails, the SavedQueue constructor is never
+ // called, so this->queue is still initialized. (The move doesn't occur
+ // until the constructor gets called.)
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ queue = Queue(SystemAllocPolicy());
+ draining_ = false;
+ return saved;
+}
+
+mozilla::GenericErrorResult<OOM> JSContext::alreadyReportedOOM() {
+#ifdef DEBUG
+ if (isHelperThreadContext()) {
+ // Keep in sync with addPendingOutOfMemory.
+ if (FrontendErrors* errors = frontendErrors()) {
+ MOZ_ASSERT(errors->outOfMemory);
+ }
+ } else {
+ MOZ_ASSERT(isThrowingOutOfMemory());
+ }
+#endif
+ return mozilla::Err(JS::OOM());
+}
+
+mozilla::GenericErrorResult<JS::Error> JSContext::alreadyReportedError() {
+ return mozilla::Err(JS::Error());
+}
+
+JSContext::JSContext(JSRuntime* runtime, const JS::ContextOptions& options)
+ : runtime_(runtime),
+ kind_(ContextKind::Uninitialized),
+ options_(this, options),
+ freeUnusedMemory(false),
+ measuringExecutionTime_(this, false),
+ jitActivation(this, nullptr),
+ isolate(this, nullptr),
+ activation_(this, nullptr),
+ profilingActivation_(nullptr),
+ entryMonitor(this, nullptr),
+ noExecuteDebuggerTop(this, nullptr),
+#ifdef DEBUG
+ inUnsafeCallWithABI(this, false),
+ hasAutoUnsafeCallWithABI(this, false),
+#endif
+#ifdef JS_SIMULATOR
+ simulator_(this, nullptr),
+#endif
+ dtoaState(this, nullptr),
+ suppressGC(this, 0),
+#ifdef FUZZING_JS_FUZZILLI
+ executionHash(1),
+ executionHashInputs(0),
+#endif
+#ifdef DEBUG
+ noNurseryAllocationCheck(this, 0),
+ disableStrictProxyCheckingCount(this, 0),
+#endif
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ runningOOMTest(this, false),
+#endif
+ inUnsafeRegion(this, 0),
+ generationalDisabled(this, 0),
+ compactingDisabledCount(this, 0),
+ frontendCollectionPool_(this),
+ suppressProfilerSampling(false),
+ tempLifoAlloc_(this, (size_t)TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
+ debuggerMutations(this, 0),
+ ionPcScriptCache(this, nullptr),
+ status(this, JS::ExceptionStatus::None),
+ unwrappedException_(this),
+ unwrappedExceptionStack_(this),
+#ifdef DEBUG
+ hadResourceExhaustion_(this, false),
+#endif
+ reportGranularity(this, JS_DEFAULT_JITREPORT_GRANULARITY),
+ resolvingList(this, nullptr),
+#ifdef DEBUG
+ enteredPolicy(this, nullptr),
+#endif
+ generatingError(this, false),
+ cycleDetectorVector_(this, this),
+ data(nullptr),
+ asyncStackForNewActivations_(this),
+ asyncCauseForNewActivations(this, nullptr),
+ asyncCallIsExplicit(this, false),
+ interruptCallbacks_(this),
+ interruptCallbackDisabled(this, false),
+ interruptBits_(0),
+ inlinedICScript_(this, nullptr),
+ jitStackLimit(JS::NativeStackLimitMin),
+ jitStackLimitNoInterrupt(this, JS::NativeStackLimitMin),
+ jobQueue(this, nullptr),
+ internalJobQueue(this),
+ canSkipEnqueuingJobs(this, false),
+ promiseRejectionTrackerCallback(this, nullptr),
+ promiseRejectionTrackerCallbackData(this, nullptr),
+#ifdef JS_STRUCTURED_SPEW
+ structuredSpewer_(),
+#endif
+ insideDebuggerEvaluationWithOnNativeCallHook(this, nullptr) {
+ MOZ_ASSERT(static_cast<JS::RootingContext*>(this) ==
+ JS::RootingContext::get(this));
+}
+
+JSContext::~JSContext() {
+ // Clear the ContextKind first, so that ProtectedData checks will allow us to
+ // destroy this context even if the runtime is already gone.
+ kind_ = ContextKind::Uninitialized;
+
+ /* Free the stuff hanging off of cx. */
+ MOZ_ASSERT(!resolvingList);
+
+ if (dtoaState) {
+ DestroyDtoaState(dtoaState);
+ }
+
+ fx.destroyInstance();
+
+#ifdef JS_SIMULATOR
+ js::jit::Simulator::Destroy(simulator_);
+#endif
+
+ if (isolate) {
+ irregexp::DestroyIsolate(isolate.ref());
+ }
+
+ TlsContext.set(nullptr);
+}
+
+void JSContext::setHelperThread(const JS::ContextOptions& options,
+ const AutoLockHelperThreadState& locked) {
+ MOZ_ASSERT(isHelperThreadContext());
+ MOZ_ASSERT_IF(!JSRuntime::hasLiveRuntimes(), !TlsContext.get());
+ MOZ_ASSERT(currentThread_ == ThreadId());
+
+ TlsContext.set(this);
+ currentThread_ = ThreadId::ThisThreadId();
+ options_ = options;
+}
+
+void JSContext::clearHelperThread(const AutoLockHelperThreadState& locked) {
+ MOZ_ASSERT(isHelperThreadContext());
+ MOZ_ASSERT(TlsContext.get() == this);
+ MOZ_ASSERT(currentThread_ == ThreadId::ThisThreadId());
+
+ currentThread_ = ThreadId();
+ options_ = JS::ContextOptions();
+ TlsContext.set(nullptr);
+}
+
+void JSContext::setRuntime(JSRuntime* rt) {
+ MOZ_ASSERT(!resolvingList);
+ MOZ_ASSERT(!compartment());
+ MOZ_ASSERT(!activation());
+ MOZ_ASSERT(!unwrappedException_.ref().initialized());
+ MOZ_ASSERT(!unwrappedExceptionStack_.ref().initialized());
+ MOZ_ASSERT(!asyncStackForNewActivations_.ref().initialized());
+
+ runtime_ = rt;
+}
+
+#if defined(NIGHTLY_BUILD)
+static bool IsOutOfMemoryException(JSContext* cx, const Value& v) {
+ return v == StringValue(cx->names().outOfMemory);
+}
+#endif
+
+void JSContext::setPendingException(HandleValue v, Handle<SavedFrame*> stack) {
+#if defined(NIGHTLY_BUILD)
+ do {
+ // Do not intercept exceptions if we are already
+ // in the exception interceptor. That would lead
+ // to infinite recursion.
+ if (this->runtime()->errorInterception.isExecuting) {
+ break;
+ }
+
+ // Check whether we have an interceptor at all.
+ if (!this->runtime()->errorInterception.interceptor) {
+ break;
+ }
+
+ // Don't report OOM exceptions. The interceptor isn't interested in those
+ // and they can confuse the interceptor because OOM can be thrown when we
+ // are not in a realm (atom allocation, for example).
+ if (IsOutOfMemoryException(this, v)) {
+ break;
+ }
+
+ // Make sure that we do not call the interceptor from within
+ // the interceptor.
+ this->runtime()->errorInterception.isExecuting = true;
+
+ // The interceptor must be infallible.
+ const mozilla::DebugOnly<bool> wasExceptionPending =
+ this->isExceptionPending();
+ this->runtime()->errorInterception.interceptor->interceptError(this, v);
+ MOZ_ASSERT(wasExceptionPending == this->isExceptionPending());
+
+ this->runtime()->errorInterception.isExecuting = false;
+ } while (false);
+#endif // defined(NIGHTLY_BUILD)
+
+ // overRecursed_ is set after the fact by ReportOverRecursed.
+ this->status = JS::ExceptionStatus::Throwing;
+ this->unwrappedException() = v;
+ this->unwrappedExceptionStack() = stack;
+}
+
+void JSContext::setPendingException(HandleValue value,
+ ShouldCaptureStack captureStack) {
+ Rooted<SavedFrame*> nstack(this);
+ if (captureStack == ShouldCaptureStack::Always ||
+ realm()->shouldCaptureStackForThrow()) {
+ RootedObject stack(this);
+ if (!CaptureStack(this, &stack)) {
+ clearPendingException();
+ }
+ if (stack) {
+ nstack = &stack->as<SavedFrame>();
+ }
+ }
+ setPendingException(value, nstack);
+}
+
+bool JSContext::getPendingException(MutableHandleValue rval) {
+ MOZ_ASSERT(isExceptionPending());
+
+ RootedValue exception(this, unwrappedException());
+ if (zone()->isAtomsZone()) {
+ rval.set(exception);
+ return true;
+ }
+
+ Rooted<SavedFrame*> stack(this, unwrappedExceptionStack());
+ JS::ExceptionStatus prevStatus = status;
+ clearPendingException();
+ if (!compartment()->wrap(this, &exception)) {
+ return false;
+ }
+ this->check(exception);
+ setPendingException(exception, stack);
+ status = prevStatus;
+
+ rval.set(exception);
+ return true;
+}
+
+SavedFrame* JSContext::getPendingExceptionStack() {
+ return unwrappedExceptionStack();
+}
+
+bool JSContext::isClosingGenerator() {
+ return isExceptionPending() &&
+ unwrappedException().isMagic(JS_GENERATOR_CLOSING);
+}
+
+bool JSContext::isThrowingDebuggeeWouldRun() {
+ return isExceptionPending() && unwrappedException().isObject() &&
+ unwrappedException().toObject().is<ErrorObject>() &&
+ unwrappedException().toObject().as<ErrorObject>().type() ==
+ JSEXN_DEBUGGEEWOULDRUN;
+}
+
+bool JSContext::isRuntimeCodeGenEnabled(JS::RuntimeCode kind,
+ HandleString code) {
+ // Make sure that the CSP callback is installed and that it permits runtime
+ // code generation.
+ if (JSCSPEvalChecker allows =
+ runtime()->securityCallbacks->contentSecurityPolicyAllows) {
+ return allows(this, kind, code);
+ }
+
+ return true;
+}
+
+size_t JSContext::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ /*
+ * There are other JSContext members that could be measured; the following
+ * ones have been found by DMD to be worth measuring. More stuff may be
+ * added later.
+ */
+ return cycleDetectorVector().sizeOfExcludingThis(mallocSizeOf) +
+ irregexp::IsolateSizeOfIncludingThis(isolate, mallocSizeOf);
+}
+
+size_t JSContext::sizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+}
+
+#ifdef DEBUG
+bool JSContext::inAtomsZone() const { return zone_->isAtomsZone(); }
+#endif
+
+void JSContext::trace(JSTracer* trc) {
+ cycleDetectorVector().trace(trc);
+ geckoProfiler().trace(trc);
+ if (isolate) {
+ irregexp::TraceIsolate(trc, isolate.ref());
+ }
+}
+
+JS::NativeStackLimit JSContext::stackLimitForJitCode(JS::StackKind kind) {
+ MOZ_ASSERT(isMainThreadContext());
+#ifdef JS_SIMULATOR
+ return simulator()->stackLimit();
+#else
+ return stackLimit(kind);
+#endif
+}
+
+void JSContext::resetJitStackLimit() {
+ MOZ_ASSERT(isMainThreadContext());
+
+ // Note that, for now, we use the untrusted limit for ion. This is fine,
+ // because it's the most conservative limit, and if we hit it, we'll bail
+ // out of ion into the interpreter, which will do a proper recursion check.
+#ifdef JS_SIMULATOR
+ jitStackLimit = jit::Simulator::StackLimit();
+#else
+ jitStackLimit = nativeStackLimit[JS::StackForUntrustedScript];
+#endif
+ jitStackLimitNoInterrupt = jitStackLimit;
+}
+
+void JSContext::initJitStackLimit() { resetJitStackLimit(); }
+
+#ifdef JS_CRASH_DIAGNOSTICS
+void ContextChecks::check(AbstractFramePtr frame, int argIndex) {
+ if (frame) {
+ check(frame.realm(), argIndex);
+ }
+}
+#endif
+
+void AutoEnterOOMUnsafeRegion::crash(const char* reason) {
+ char msgbuf[1024];
+ js::NoteIntentionalCrash();
+ SprintfLiteral(msgbuf, "[unhandlable oom] %s", reason);
+#ifndef DEBUG
+ // In non-DEBUG builds MOZ_CRASH normally doesn't print to stderr so we have
+ // to do this explicitly (the jit-test allow-unhandlable-oom annotation and
+ // fuzzers depend on it).
+ fprintf(stderr, "Hit MOZ_CRASH(%s) at %s:%d\n", msgbuf, __FILE__, __LINE__);
+#endif
+ MOZ_CRASH_UNSAFE(msgbuf);
+}
+
+mozilla::Atomic<AutoEnterOOMUnsafeRegion::AnnotateOOMAllocationSizeCallback,
+ mozilla::Relaxed>
+ AutoEnterOOMUnsafeRegion::annotateOOMSizeCallback(nullptr);
+
+void AutoEnterOOMUnsafeRegion::crash(size_t size, const char* reason) {
+ {
+ JS::AutoSuppressGCAnalysis suppress;
+ if (annotateOOMSizeCallback) {
+ annotateOOMSizeCallback(size);
+ }
+ }
+ crash(reason);
+}
+
+void ExternalValueArray::trace(JSTracer* trc) {
+ if (Value* vp = begin()) {
+ TraceRootRange(trc, length(), vp, "js::ExternalValueArray");
+ }
+}
+
+#ifdef DEBUG
+AutoUnsafeCallWithABI::AutoUnsafeCallWithABI(UnsafeABIStrictness strictness)
+ : cx_(TlsContext.get()),
+ nested_(cx_ ? cx_->hasAutoUnsafeCallWithABI : false),
+ nogc(cx_) {
+ if (!cx_) {
+ // This is a helper thread doing Ion or Wasm compilation - nothing to do.
+ return;
+ }
+ switch (strictness) {
+ case UnsafeABIStrictness::NoExceptions:
+ MOZ_ASSERT(!JS_IsExceptionPending(cx_));
+ checkForPendingException_ = true;
+ break;
+ case UnsafeABIStrictness::AllowPendingExceptions:
+ checkForPendingException_ = !JS_IsExceptionPending(cx_);
+ break;
+ case UnsafeABIStrictness::AllowThrownExceptions:
+ checkForPendingException_ = false;
+ break;
+ }
+
+ cx_->hasAutoUnsafeCallWithABI = true;
+}
+
+AutoUnsafeCallWithABI::~AutoUnsafeCallWithABI() {
+ if (!cx_) {
+ return;
+ }
+ MOZ_ASSERT(cx_->hasAutoUnsafeCallWithABI);
+ if (!nested_) {
+ cx_->hasAutoUnsafeCallWithABI = false;
+ cx_->inUnsafeCallWithABI = false;
+ }
+ MOZ_ASSERT_IF(checkForPendingException_, !JS_IsExceptionPending(cx_));
+}
+#endif
+
+#ifdef __wasi__
+JS_PUBLIC_API void js::IncWasiRecursionDepth(JSContext* cx) {
+ ++JS::RootingContext::get(cx)->wasiRecursionDepth;
+}
+
+JS_PUBLIC_API void js::DecWasiRecursionDepth(JSContext* cx) {
+ MOZ_ASSERT(JS::RootingContext::get(cx)->wasiRecursionDepth > 0);
+ --JS::RootingContext::get(cx)->wasiRecursionDepth;
+}
+
+JS_PUBLIC_API bool js::CheckWasiRecursionLimit(JSContext* cx) {
+ // WASI has two limits:
+ // 1) The stack pointer in linear memory that grows to zero. See
+ // --stack-first in js/src/shell/moz.build.
+ // 2) The JS::RootingContext::wasiRecursionDepth that counts recursion depth.
+ // Here we should check both.
+ if (JS::RootingContext::get(cx)->wasiRecursionDepth >=
+ JS::RootingContext::wasiRecursionDepthLimit) {
+ return false;
+ }
+ return true;
+}
+#endif // __wasi__
diff --git a/js/src/vm/JSContext.h b/js/src/vm/JSContext.h
new file mode 100644
index 0000000000..450b4ac2c4
--- /dev/null
+++ b/js/src/vm/JSContext.h
@@ -0,0 +1,1139 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JS execution context. */
+
+#ifndef vm_JSContext_h
+#define vm_JSContext_h
+
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "builtin/AtomicsObject.h"
+#include "ds/TraceableFifo.h"
+#include "frontend/NameCollections.h"
+#include "gc/Memory.h"
+#include "irregexp/RegExpTypes.h"
+#include "jit/PcScriptCache.h"
+#include "js/ContextOptions.h" // JS::ContextOptions
+#include "js/Exception.h"
+#include "js/GCVector.h"
+#include "js/Interrupt.h"
+#include "js/Promise.h"
+#include "js/Result.h"
+#include "js/Stack.h" // JS::NativeStackBase, JS::NativeStackLimit
+#include "js/Utility.h"
+#include "js/Vector.h"
+#include "threading/ProtectedData.h"
+#include "util/StructuredSpewer.h"
+#include "vm/Activation.h" // js::Activation
+#include "vm/MallocProvider.h"
+#include "vm/Runtime.h"
+#include "wasm/WasmContext.h"
+
+struct JS_PUBLIC_API JSContext;
+
+struct DtoaState;
+
+namespace js {
+
+class AutoAllocInAtomsZone;
+class AutoMaybeLeaveAtomsZone;
+class AutoRealm;
+
+namespace jit {
+class ICScript;
+class JitActivation;
+class JitContext;
+class DebugModeOSRVolatileJitFrameIter;
+} // namespace jit
+
+/* Detects cycles when traversing an object graph. */
+class MOZ_RAII AutoCycleDetector {
+ public:
+ using Vector = GCVector<JSObject*, 8>;
+
+ AutoCycleDetector(JSContext* cx, HandleObject objArg)
+ : cx(cx), obj(cx, objArg), cyclic(true) {}
+
+ ~AutoCycleDetector();
+
+ bool init();
+
+ bool foundCycle() { return cyclic; }
+
+ private:
+ JSContext* cx;
+ RootedObject obj;
+ bool cyclic;
+};
+
+struct AutoResolving;
+
+struct FrontendErrors; // vm/HelperThreadState.h
+
+class InternalJobQueue : public JS::JobQueue {
+ public:
+ explicit InternalJobQueue(JSContext* cx)
+ : queue(cx, SystemAllocPolicy()), draining_(false), interrupted_(false) {}
+ ~InternalJobQueue() = default;
+
+ // JS::JobQueue methods.
+ JSObject* getIncumbentGlobal(JSContext* cx) override;
+ bool enqueuePromiseJob(JSContext* cx, JS::HandleObject promise,
+ JS::HandleObject job, JS::HandleObject allocationSite,
+ JS::HandleObject incumbentGlobal) override;
+ void runJobs(JSContext* cx) override;
+ bool empty() const override;
+
+ // If we are currently in a call to runJobs(), make that call stop processing
+ // jobs once the current one finishes, and return. If we are not currently in
+ // a call to runJobs, make all future calls return immediately.
+ void interrupt() { interrupted_ = true; }
+
+ // Return the front element of the queue, or nullptr if the queue is empty.
+ // This is only used by shell testing functions.
+ JSObject* maybeFront() const;
+
+#ifdef DEBUG
+ JSObject* copyJobs(JSContext* cx);
+#endif
+
+ private:
+ using Queue = js::TraceableFifo<JSObject*, 0, SystemAllocPolicy>;
+
+ JS::PersistentRooted<Queue> queue;
+
+ // True if we are in the midst of draining jobs from this queue. We use this
+ // to avoid re-entry (nested calls simply return immediately).
+ bool draining_;
+
+ // True if we've been asked to interrupt draining jobs. Set by interrupt().
+ bool interrupted_;
+
+ class SavedQueue;
+ js::UniquePtr<JobQueue::SavedJobQueue> saveJobQueue(JSContext*) override;
+};
+
+class AutoLockScriptData;
+
+/* Thread Local Storage slot for storing the context for a thread. */
+extern MOZ_THREAD_LOCAL(JSContext*) TlsContext;
+
+enum class ContextKind {
+ Uninitialized,
+
+ // Context for the main thread of a JSRuntime.
+ MainThread,
+
+ // Context for a helper thread.
+ HelperThread
+};
+
+#ifdef DEBUG
+JSContext* MaybeGetJSContext();
+bool CurrentThreadIsParseThread();
+#endif
+
+enum class InterruptReason : uint32_t {
+ GC = 1 << 0,
+ AttachIonCompilations = 1 << 1,
+ CallbackUrgent = 1 << 2,
+ CallbackCanWait = 1 << 3,
+};
+
+enum class ShouldCaptureStack { Maybe, Always };
+
+} /* namespace js */
+
+/*
+ * A JSContext encapsulates the thread local state used when using the JS
+ * runtime.
+ */
+struct JS_PUBLIC_API JSContext : public JS::RootingContext,
+ public js::MallocProvider<JSContext> {
+ JSContext(JSRuntime* runtime, const JS::ContextOptions& options);
+ ~JSContext();
+
+ bool init(js::ContextKind kind);
+
+ private:
+ js::UnprotectedData<JSRuntime*> runtime_;
+ js::WriteOnceData<js::ContextKind> kind_;
+
+ js::ContextData<JS::ContextOptions> options_;
+
+ // Thread that the JSContext is currently running on, if in use.
+ js::ThreadId currentThread_;
+
+ js::FrontendErrors* errors_;
+
+ // When a helper thread is using a context, it may need to periodically
+ // free unused memory.
+ mozilla::Atomic<bool, mozilla::ReleaseAcquire> freeUnusedMemory;
+
+ // Are we currently timing execution? This flag ensures that we do not
+ // double-count execution time in reentrant situations.
+ js::ContextData<bool> measuringExecutionTime_;
+
+ // This variable is used by the HelperThread scheduling to update the priority
+ // of task based on whether JavaScript is being executed on the main thread.
+ mozilla::Atomic<bool, mozilla::ReleaseAcquire> isExecuting_;
+
+ public:
+ // This is used by helper threads to change the runtime their context is
+ // currently operating on.
+ void setRuntime(JSRuntime* rt);
+
+ void setHelperThread(const JS::ContextOptions& options,
+ const js::AutoLockHelperThreadState& locked);
+ void clearHelperThread(const js::AutoLockHelperThreadState& locked);
+
+ bool contextAvailable(js::AutoLockHelperThreadState& locked) {
+ MOZ_ASSERT(kind_ == js::ContextKind::HelperThread);
+ return currentThread_ == js::ThreadId();
+ }
+
+ void setFreeUnusedMemory(bool shouldFree) { freeUnusedMemory = shouldFree; }
+
+ bool shouldFreeUnusedMemory() const {
+ return kind_ == js::ContextKind::HelperThread && freeUnusedMemory;
+ }
+
+ bool isMeasuringExecutionTime() const { return measuringExecutionTime_; }
+ void setIsMeasuringExecutionTime(bool value) {
+ measuringExecutionTime_ = value;
+ }
+
+ // While JSContexts are meant to be used on a single thread, this reference is
+ // meant to be shared to helper thread tasks. This is used by helper threads
+ // to change the priority of tasks based on whether JavaScript is executed on
+ // the main thread.
+ const mozilla::Atomic<bool, mozilla::ReleaseAcquire>& isExecutingRef() const {
+ return isExecuting_;
+ }
+ void setIsExecuting(bool value) { isExecuting_ = value; }
+
+#ifdef DEBUG
+ bool isInitialized() const { return kind_ != js::ContextKind::Uninitialized; }
+#endif
+
+ bool isMainThreadContext() const {
+ return kind_ == js::ContextKind::MainThread;
+ }
+
+ bool isHelperThreadContext() const {
+ return kind_ == js::ContextKind::HelperThread;
+ }
+
+ template <typename T>
+ bool isInsideCurrentZone(T thing) const {
+ return thing->zoneFromAnyThread() == zone_;
+ }
+
+ template <typename T>
+ inline bool isInsideCurrentCompartment(T thing) const {
+ return thing->compartment() == compartment();
+ }
+
+ void onOutOfMemory();
+ void* onOutOfMemory(js::AllocFunction allocFunc, arena_id_t arena,
+ size_t nbytes, void* reallocPtr = nullptr) {
+ if (isHelperThreadContext()) {
+ addPendingOutOfMemory();
+ return nullptr;
+ }
+ return runtime_->onOutOfMemory(allocFunc, arena, nbytes, reallocPtr, this);
+ }
+
+ void onOverRecursed();
+
+ // Allocate a GC thing.
+ template <typename T, js::AllowGC allowGC = js::CanGC, typename... Args>
+ T* newCell(Args&&... args) {
+ return js::gc::CellAllocator::template NewCell<T, allowGC>(
+ this, std::forward<Args>(args)...);
+ }
+
+ /* Clear the pending exception (if any) due to OOM. */
+ void recoverFromOutOfMemory();
+
+ void reportAllocationOverflow();
+
+ // Accessors for immutable runtime data.
+ JSAtomState& names() { return *runtime_->commonNames; }
+ js::StaticStrings& staticStrings() { return *runtime_->staticStrings; }
+ bool permanentAtomsPopulated() { return runtime_->permanentAtomsPopulated(); }
+ const js::FrozenAtomSet& permanentAtoms() {
+ return *runtime_->permanentAtoms();
+ }
+ js::WellKnownSymbols& wellKnownSymbols() {
+ return *runtime_->wellKnownSymbols;
+ }
+ js::PropertyName* emptyString() { return runtime_->emptyString; }
+ JS::GCContext* gcContext() { return runtime_->gcContext(); }
+ JS::StackKind stackKindForCurrentPrincipal();
+ JS::NativeStackLimit stackLimitForCurrentPrincipal();
+ JS::NativeStackLimit stackLimit(JS::StackKind kind) {
+ MOZ_ASSERT(isMainThreadContext());
+ return nativeStackLimit[kind];
+ }
+ JS::NativeStackLimit stackLimitForJitCode(JS::StackKind kind);
+ size_t gcSystemPageSize() { return js::gc::SystemPageSize(); }
+
+ /*
+ * "Entering" a realm changes cx->realm (which changes cx->global). Note
+ * that this does not push an Activation so it's possible for the caller's
+ * realm to be != cx->realm(). This is not a problem since, in general, most
+ * places in the VM cannot know that they were called from script (e.g.,
+ * they may have been called through the JSAPI via JS_CallFunction) and thus
+ * cannot expect there is a scripted caller.
+ *
+ * Realms should be entered/left in a LIFO fasion. To enter a realm, code
+ * should prefer using AutoRealm over JS::EnterRealm/JS::LeaveRealm.
+ *
+ * Also note that the JIT can enter (same-compartment) realms without going
+ * through these methods - it will update cx->realm_ directly.
+ */
+ private:
+ inline void setRealm(JS::Realm* realm);
+ inline void enterRealm(JS::Realm* realm);
+
+ inline void enterAtomsZone();
+ inline void leaveAtomsZone(JS::Realm* oldRealm);
+ inline void setZone(js::Zone* zone);
+
+ friend class js::AutoAllocInAtomsZone;
+ friend class js::AutoMaybeLeaveAtomsZone;
+ friend class js::AutoRealm;
+
+ public:
+ inline void enterRealmOf(JSObject* target);
+ inline void enterRealmOf(JSScript* target);
+ inline void enterRealmOf(js::Shape* target);
+ inline void enterNullRealm();
+
+ inline void setRealmForJitExceptionHandler(JS::Realm* realm);
+
+ inline void leaveRealm(JS::Realm* oldRealm);
+
+ void setFrontendErrors(js::FrontendErrors* errors) { errors_ = errors; }
+ js::FrontendErrors* frontendErrors() const { return errors_; }
+
+ // Threads may freely access any data in their realm, compartment and zone.
+ JS::Compartment* compartment() const {
+ return realm_ ? JS::GetCompartmentForRealm(realm_) : nullptr;
+ }
+
+ JS::Realm* realm() const { return realm_; }
+
+#ifdef DEBUG
+ bool inAtomsZone() const;
+#endif
+
+ JS::Zone* zone() const {
+ MOZ_ASSERT_IF(!realm() && zone_, inAtomsZone());
+ MOZ_ASSERT_IF(realm(), js::GetRealmZone(realm()) == zone_);
+ return zoneRaw();
+ }
+
+ // For use when the context's zone is being read by another thread and the
+ // compartment and zone pointers might not be in sync.
+ JS::Zone* zoneRaw() const { return zone_; }
+
+ // For JIT use.
+ static size_t offsetOfZone() { return offsetof(JSContext, zone_); }
+
+ // Current global. This is only safe to use within the scope of the
+ // AutoRealm from which it's called.
+ inline js::Handle<js::GlobalObject*> global() const;
+
+ js::AtomsTable& atoms() { return runtime_->atoms(); }
+
+ js::SymbolRegistry& symbolRegistry() { return runtime_->symbolRegistry(); }
+
+ // Methods to access other runtime data that checks locking internally.
+ js::gc::AtomMarkingRuntime& atomMarking() { return runtime_->gc.atomMarking; }
+ void markAtom(JSAtom* atom) { atomMarking().markAtom(this, atom); }
+ void markAtom(JS::Symbol* symbol) { atomMarking().markAtom(this, symbol); }
+ void markId(jsid id) { atomMarking().markId(this, id); }
+ void markAtomValue(const js::Value& value) {
+ atomMarking().markAtomValue(this, value);
+ }
+
+ // Interface for recording telemetry metrics.
+ js::Metrics metrics() { return js::Metrics(runtime_); }
+
+ // Methods specific to any HelperThread for the context.
+ void addPendingOverRecursed();
+ void addPendingOutOfMemory();
+
+ JSRuntime* runtime() { return runtime_; }
+ const JSRuntime* runtime() const { return runtime_; }
+
+ static size_t offsetOfRealm() { return offsetof(JSContext, realm_); }
+
+ friend class JS::AutoSaveExceptionState;
+ friend class js::jit::DebugModeOSRVolatileJitFrameIter;
+ friend void js::ReportOutOfMemory(JSContext*);
+ friend void js::ReportOverRecursed(JSContext*);
+ friend void js::ReportOversizedAllocation(JSContext*, const unsigned);
+
+ public:
+ inline JS::Result<> boolToResult(bool ok);
+
+ /**
+ * Intentionally awkward signpost method that is stationed on the
+ * boundary between Result-using and non-Result-using code.
+ */
+ template <typename V, typename E>
+ bool resultToBool(const JS::Result<V, E>& result) {
+ return result.isOk();
+ }
+
+ template <typename V, typename E>
+ V* resultToPtr(JS::Result<V*, E>& result) {
+ return result.isOk() ? result.unwrap() : nullptr;
+ }
+
+ mozilla::GenericErrorResult<JS::OOM> alreadyReportedOOM();
+ mozilla::GenericErrorResult<JS::Error> alreadyReportedError();
+
+ /*
+ * Points to the most recent JitActivation pushed on the thread.
+ * See JitActivation constructor in vm/Stack.cpp
+ */
+ js::ContextData<js::jit::JitActivation*> jitActivation;
+
+ // Shim for V8 interfaces used by irregexp code
+ js::ContextData<js::irregexp::Isolate*> isolate;
+
+ /*
+ * Points to the most recent activation running on the thread.
+ * See Activation comment in vm/Stack.h.
+ */
+ js::ContextData<js::Activation*> activation_;
+
+ /*
+ * Points to the most recent profiling activation running on the
+ * thread.
+ */
+ js::Activation* volatile profilingActivation_;
+
+ public:
+ js::Activation* activation() const { return activation_; }
+ static size_t offsetOfActivation() {
+ return offsetof(JSContext, activation_);
+ }
+
+ js::Activation* profilingActivation() const { return profilingActivation_; }
+ static size_t offsetOfProfilingActivation() {
+ return offsetof(JSContext, profilingActivation_);
+ }
+
+ static size_t offsetOfJitActivation() {
+ return offsetof(JSContext, jitActivation);
+ }
+
+#ifdef DEBUG
+ static size_t offsetOfInUnsafeCallWithABI() {
+ return offsetof(JSContext, inUnsafeCallWithABI);
+ }
+#endif
+
+ static size_t offsetOfInlinedICScript() {
+ return offsetof(JSContext, inlinedICScript_);
+ }
+
+ public:
+ js::InterpreterStack& interpreterStack() {
+ return runtime()->interpreterStack();
+ }
+
+ private:
+ // Base address of the native stack for the current thread.
+ mozilla::Maybe<JS::NativeStackBase> nativeStackBase_;
+
+ public:
+ JS::NativeStackBase nativeStackBase() const {
+ MOZ_ASSERT(isMainThreadContext());
+ return *nativeStackBase_;
+ }
+
+ public:
+ /* If non-null, report JavaScript entry points to this monitor. */
+ js::ContextData<JS::dbg::AutoEntryMonitor*> entryMonitor;
+
+ /*
+ * Stack of debuggers that currently disallow debuggee execution.
+ *
+ * When we check for NX we are inside the debuggee compartment, and thus a
+ * stack of Debuggers that have prevented execution need to be tracked to
+ * enter the correct Debugger compartment to report the error.
+ */
+ js::ContextData<js::EnterDebuggeeNoExecute*> noExecuteDebuggerTop;
+
+#ifdef DEBUG
+ js::ContextData<uint32_t> inUnsafeCallWithABI;
+ js::ContextData<bool> hasAutoUnsafeCallWithABI;
+#endif
+
+#ifdef JS_SIMULATOR
+ private:
+ js::ContextData<js::jit::Simulator*> simulator_;
+
+ public:
+ js::jit::Simulator* simulator() const;
+ JS::NativeStackLimit* addressOfSimulatorStackLimit();
+#endif
+
+ public:
+ // State used by util/DoubleToString.cpp.
+ js::ContextData<DtoaState*> dtoaState;
+
+ /*
+ * When this flag is non-zero, any attempt to GC will be skipped. See the
+ * AutoSuppressGC class for for details.
+ */
+ js::ContextData<int32_t> suppressGC;
+
+#ifdef FUZZING_JS_FUZZILLI
+ uint32_t executionHash;
+ uint32_t executionHashInputs;
+#endif
+
+#ifdef DEBUG
+ js::ContextData<size_t> noNurseryAllocationCheck;
+
+ /*
+ * If this is 0, all cross-compartment proxies must be registered in the
+ * wrapper map. This checking must be disabled temporarily while creating
+ * new wrappers. When non-zero, this records the recursion depth of wrapper
+ * creation.
+ */
+ js::ContextData<uintptr_t> disableStrictProxyCheckingCount;
+
+ bool isNurseryAllocAllowed() { return noNurseryAllocationCheck == 0; }
+ void disallowNurseryAlloc() { ++noNurseryAllocationCheck; }
+ void allowNurseryAlloc() {
+ MOZ_ASSERT(!isNurseryAllocAllowed());
+ --noNurseryAllocationCheck;
+ }
+
+ bool isStrictProxyCheckingEnabled() {
+ return disableStrictProxyCheckingCount == 0;
+ }
+ void disableStrictProxyChecking() { ++disableStrictProxyCheckingCount; }
+ void enableStrictProxyChecking() {
+ MOZ_ASSERT(disableStrictProxyCheckingCount > 0);
+ --disableStrictProxyCheckingCount;
+ }
+#endif
+
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ // We are currently running a simulated OOM test.
+ js::ContextData<bool> runningOOMTest;
+#endif
+
+ /*
+ * Some regions of code are hard for the static rooting hazard analysis to
+ * understand. In those cases, we trade the static analysis for a dynamic
+ * analysis. When this is non-zero, we should assert if we trigger, or
+ * might trigger, a GC.
+ */
+ js::ContextData<int> inUnsafeRegion;
+
+ // Count of AutoDisableGenerationalGC instances on the thread's stack.
+ js::ContextData<unsigned> generationalDisabled;
+
+ // Some code cannot tolerate compacting GC so it can be disabled temporarily
+ // with AutoDisableCompactingGC which uses this counter.
+ js::ContextData<unsigned> compactingDisabledCount;
+
+ private:
+ // Pools used for recycling name maps and vectors when parsing and
+ // emitting bytecode. Purged on GC when there are no active script
+ // compilations.
+ js::ContextData<js::frontend::NameCollectionPool> frontendCollectionPool_;
+
+ public:
+ js::frontend::NameCollectionPool& frontendCollectionPool() {
+ return frontendCollectionPool_.ref();
+ }
+
+ void verifyIsSafeToGC() {
+ MOZ_DIAGNOSTIC_ASSERT(!inUnsafeRegion,
+ "[AutoAssertNoGC] possible GC in GC-unsafe region");
+ }
+
+ bool isInUnsafeRegion() const { return bool(inUnsafeRegion); }
+
+ // For JIT use.
+ void resetInUnsafeRegion() {
+ MOZ_ASSERT(inUnsafeRegion >= 0);
+ inUnsafeRegion = 0;
+ }
+
+ static constexpr size_t offsetOfInUnsafeRegion() {
+ return offsetof(JSContext, inUnsafeRegion);
+ }
+
+ /* Whether sampling should be enabled or not. */
+ private:
+ mozilla::Atomic<bool, mozilla::SequentiallyConsistent>
+ suppressProfilerSampling;
+
+ public:
+ bool isProfilerSamplingEnabled() const { return !suppressProfilerSampling; }
+ void disableProfilerSampling() { suppressProfilerSampling = true; }
+ void enableProfilerSampling() { suppressProfilerSampling = false; }
+
+ private:
+ js::wasm::Context wasm_;
+
+ public:
+ js::wasm::Context& wasm() { return wasm_; }
+
+ /* Temporary arena pool used while compiling and decompiling. */
+ static const size_t TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE = 4 * 1024;
+
+ private:
+ js::ContextData<js::LifoAlloc> tempLifoAlloc_;
+
+ public:
+ js::LifoAlloc& tempLifoAlloc() { return tempLifoAlloc_.ref(); }
+ const js::LifoAlloc& tempLifoAlloc() const { return tempLifoAlloc_.ref(); }
+ js::LifoAlloc& tempLifoAllocNoCheck() { return tempLifoAlloc_.refNoCheck(); }
+
+ js::ContextData<uint32_t> debuggerMutations;
+
+ // Cache for jit::GetPcScript().
+ js::ContextData<js::UniquePtr<js::jit::PcScriptCache>> ionPcScriptCache;
+
+ private:
+ // Indicates if an exception is pending and the reason for it.
+ js::ContextData<JS::ExceptionStatus> status;
+ js::ContextData<JS::PersistentRooted<JS::Value>>
+ unwrappedException_; /* most-recently-thrown exception */
+ js::ContextData<JS::PersistentRooted<js::SavedFrame*>>
+ unwrappedExceptionStack_; /* stack when the exception was thrown */
+
+ JS::Value& unwrappedException() {
+ if (!unwrappedException_.ref().initialized()) {
+ unwrappedException_.ref().init(this);
+ }
+ return unwrappedException_.ref().get();
+ }
+
+ js::SavedFrame*& unwrappedExceptionStack() {
+ if (!unwrappedExceptionStack_.ref().initialized()) {
+ unwrappedExceptionStack_.ref().init(this);
+ }
+ return unwrappedExceptionStack_.ref().get();
+ }
+
+#ifdef DEBUG
+ // True if this context has ever thrown an exception because of an exceeded
+ // limit: stack space (ReportOverRecursed), memory (ReportOutOfMemory), or
+ // some other self-imposed limit (eg ReportOversizedAllocation). Used when
+ // detecting bailout loops in WarpOracle: bailout loops involving resource
+ // exhaustion are generally not interesting.
+ js::ContextData<bool> hadResourceExhaustion_;
+
+ public:
+ bool hadResourceExhaustion() const {
+ return hadResourceExhaustion_ || js::oom::simulator.isThreadSimulatingAny();
+ }
+#endif
+
+ public:
+ void reportResourceExhaustion() {
+#ifdef DEBUG
+ hadResourceExhaustion_ = true;
+#endif
+ }
+
+ js::ContextData<int32_t> reportGranularity; /* see vm/Probes.h */
+
+ js::ContextData<js::AutoResolving*> resolvingList;
+
+#ifdef DEBUG
+ js::ContextData<js::AutoEnterPolicy*> enteredPolicy;
+#endif
+
+ /* True if generating an error, to prevent runaway recursion. */
+ js::ContextData<bool> generatingError;
+
+ private:
+ /* State for object and array toSource conversion. */
+ js::ContextData<js::AutoCycleDetector::Vector> cycleDetectorVector_;
+
+ public:
+ js::AutoCycleDetector::Vector& cycleDetectorVector() {
+ return cycleDetectorVector_.ref();
+ }
+ const js::AutoCycleDetector::Vector& cycleDetectorVector() const {
+ return cycleDetectorVector_.ref();
+ }
+
+ /* Client opaque pointer. */
+ js::UnprotectedData<void*> data;
+
+ void initJitStackLimit();
+ void resetJitStackLimit();
+
+ public:
+ JS::ContextOptions& options() { return options_.ref(); }
+
+ bool runtimeMatches(JSRuntime* rt) const { return runtime_ == rt; }
+
+ private:
+ /*
+ * Youngest frame of a saved stack that will be picked up as an async stack
+ * by any new Activation, and is nullptr when no async stack should be used.
+ *
+ * The JS::AutoSetAsyncStackForNewCalls class can be used to set this.
+ *
+ * New activations will reset this to nullptr on construction after getting
+ * the current value, and will restore the previous value on destruction.
+ */
+ js::ContextData<JS::PersistentRooted<js::SavedFrame*>>
+ asyncStackForNewActivations_;
+
+ public:
+ js::SavedFrame*& asyncStackForNewActivations() {
+ if (!asyncStackForNewActivations_.ref().initialized()) {
+ asyncStackForNewActivations_.ref().init(this);
+ }
+ return asyncStackForNewActivations_.ref().get();
+ }
+
+ /*
+ * Value of asyncCause to be attached to asyncStackForNewActivations.
+ */
+ js::ContextData<const char*> asyncCauseForNewActivations;
+
+ /*
+ * True if the async call was explicitly requested, e.g. via
+ * callFunctionWithAsyncStack.
+ */
+ js::ContextData<bool> asyncCallIsExplicit;
+
+ bool currentlyRunningInInterpreter() const {
+ return activation()->isInterpreter();
+ }
+ bool currentlyRunningInJit() const { return activation()->isJit(); }
+ js::InterpreterFrame* interpreterFrame() const {
+ return activation()->asInterpreter()->current();
+ }
+ js::InterpreterRegs& interpreterRegs() const {
+ return activation()->asInterpreter()->regs();
+ }
+
+ /*
+ * Get the topmost script and optional pc on the stack. By default, this
+ * function only returns a JSScript in the current realm, returning nullptr
+ * if the current script is in a different realm. This behavior can be
+ * overridden by passing AllowCrossRealm::Allow.
+ */
+ enum class AllowCrossRealm { DontAllow = false, Allow = true };
+ inline JSScript* currentScript(
+ jsbytecode** pc = nullptr,
+ AllowCrossRealm allowCrossRealm = AllowCrossRealm::DontAllow) const;
+
+ inline js::Nursery& nursery();
+ inline void minorGC(JS::GCReason reason);
+
+ public:
+ bool isExceptionPending() const {
+ return JS::IsCatchableExceptionStatus(status);
+ }
+
+ [[nodiscard]] bool getPendingException(JS::MutableHandleValue rval);
+
+ js::SavedFrame* getPendingExceptionStack();
+
+ bool isThrowingDebuggeeWouldRun();
+ bool isClosingGenerator();
+
+ void setPendingException(JS::HandleValue v,
+ JS::Handle<js::SavedFrame*> stack);
+ void setPendingException(JS::HandleValue v,
+ js::ShouldCaptureStack captureStack);
+
+ void clearPendingException() {
+ status = JS::ExceptionStatus::None;
+ unwrappedException().setUndefined();
+ unwrappedExceptionStack() = nullptr;
+ }
+
+ bool isThrowingOutOfMemory() const {
+ return status == JS::ExceptionStatus::OutOfMemory;
+ }
+ bool isThrowingOverRecursed() const {
+ return status == JS::ExceptionStatus::OverRecursed;
+ }
+ bool isPropagatingForcedReturn() const {
+ return status == JS::ExceptionStatus::ForcedReturn;
+ }
+ void setPropagatingForcedReturn() {
+ MOZ_ASSERT(status == JS::ExceptionStatus::None);
+ status = JS::ExceptionStatus::ForcedReturn;
+ }
+ void clearPropagatingForcedReturn() {
+ MOZ_ASSERT(status == JS::ExceptionStatus::ForcedReturn);
+ status = JS::ExceptionStatus::None;
+ }
+
+ /*
+ * See JS_SetTrustedPrincipals in jsapi.h.
+ * Note: !cx->realm() is treated as trusted.
+ */
+ inline bool runningWithTrustedPrincipals();
+
+ // Checks if the page's Content-Security-Policy (CSP) allows
+ // runtime code generation "unsafe-eval", or "wasm-unsafe-eval" for Wasm.
+ bool isRuntimeCodeGenEnabled(JS::RuntimeCode kind, js::HandleString code);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ void trace(JSTracer* trc);
+
+ inline js::RuntimeCaches& caches();
+
+ public:
+ using InterruptCallbackVector =
+ js::Vector<JSInterruptCallback, 2, js::SystemAllocPolicy>;
+
+ private:
+ js::ContextData<InterruptCallbackVector> interruptCallbacks_;
+
+ public:
+ InterruptCallbackVector& interruptCallbacks() {
+ return interruptCallbacks_.ref();
+ }
+
+ js::ContextData<bool> interruptCallbackDisabled;
+
+ // Bitfield storing InterruptReason values.
+ mozilla::Atomic<uint32_t, mozilla::Relaxed> interruptBits_;
+
+ // Any thread can call requestInterrupt() to request that this thread
+ // stop running. To stop this thread, requestInterrupt sets two fields:
+ // interruptBits_ (a bitset of InterruptReasons) and jitStackLimit (set to
+ // JS::NativeStackLimitMin). The JS engine must continually poll one of these
+ // fields and call handleInterrupt if either field has the interrupt value.
+ //
+ // The point of setting jitStackLimit to JS::NativeStackLimitMin is that JIT
+ // code already needs to guard on jitStackLimit in every function prologue to
+ // avoid stack overflow, so we avoid a second branch on interruptBits_ by
+ // setting jitStackLimit to a value that is guaranteed to fail the guard.)
+ //
+ // Note that the writes to interruptBits_ and jitStackLimit use a Relaxed
+ // Atomic so, while the writes are guaranteed to eventually be visible to
+ // this thread, it can happen in any order. handleInterrupt calls the
+ // interrupt callback if either is set, so it really doesn't matter as long
+ // as the JS engine is continually polling at least one field. In corner
+ // cases, this relaxed ordering could lead to an interrupt handler being
+ // called twice in succession after a single requestInterrupt call, but
+ // that's fine.
+ void requestInterrupt(js::InterruptReason reason);
+ bool handleInterrupt();
+
+ MOZ_ALWAYS_INLINE bool hasAnyPendingInterrupt() const {
+ static_assert(sizeof(interruptBits_) == sizeof(uint32_t),
+ "Assumed by JIT callers");
+ return interruptBits_ != 0;
+ }
+ bool hasPendingInterrupt(js::InterruptReason reason) const {
+ return interruptBits_ & uint32_t(reason);
+ }
+
+ // For JIT use. Points to the inlined ICScript for a baseline script
+ // being invoked as part of a trial inlining. Contains nullptr at
+ // all times except for the brief moment between being set in the
+ // caller and read in the callee's prologue.
+ js::ContextData<js::jit::ICScript*> inlinedICScript_;
+
+ public:
+ void* addressOfInterruptBits() { return &interruptBits_; }
+ void* addressOfJitStackLimit() {
+ MOZ_ASSERT(isMainThreadContext());
+ return &jitStackLimit;
+ }
+ void* addressOfJitStackLimitNoInterrupt() {
+ MOZ_ASSERT(isMainThreadContext());
+ return &jitStackLimitNoInterrupt;
+ }
+ void* addressOfZone() { return &zone_; }
+
+ const void* addressOfRealm() const { return &realm_; }
+
+ void* addressOfInlinedICScript() { return &inlinedICScript_; }
+
+ // Futex state, used by Atomics.wait() and Atomics.wake() on the Atomics
+ // object.
+ js::FutexThread fx;
+
+ mozilla::Atomic<JS::NativeStackLimit, mozilla::Relaxed> jitStackLimit;
+
+ // Like jitStackLimit, but not reset to trigger interrupts.
+ js::ContextData<JS::NativeStackLimit> jitStackLimitNoInterrupt;
+
+ // Queue of pending jobs as described in ES2016 section 8.4.
+ //
+ // This is a non-owning pointer to either:
+ // - a JobQueue implementation the embedding provided by calling
+ // JS::SetJobQueue, owned by the embedding, or
+ // - our internal JobQueue implementation, established by calling
+ // js::UseInternalJobQueues, owned by JSContext::internalJobQueue below.
+ js::ContextData<JS::JobQueue*> jobQueue;
+
+ // If the embedding has called js::UseInternalJobQueues, this is the owning
+ // pointer to our internal JobQueue implementation, which JSContext::jobQueue
+ // borrows.
+ js::ContextData<js::UniquePtr<js::InternalJobQueue>> internalJobQueue;
+
+ // True if jobQueue is empty, or we are running the last job in the queue.
+ // Such conditions permit optimizations around `await` expressions.
+ js::ContextData<bool> canSkipEnqueuingJobs;
+
+ js::ContextData<JS::PromiseRejectionTrackerCallback>
+ promiseRejectionTrackerCallback;
+ js::ContextData<void*> promiseRejectionTrackerCallbackData;
+
+ JSObject* getIncumbentGlobal(JSContext* cx);
+ bool enqueuePromiseJob(JSContext* cx, js::HandleFunction job,
+ js::HandleObject promise,
+ js::HandleObject incumbentGlobal);
+ void addUnhandledRejectedPromise(JSContext* cx, js::HandleObject promise);
+ void removeUnhandledRejectedPromise(JSContext* cx, js::HandleObject promise);
+
+ private:
+ template <class... Args>
+ inline void checkImpl(const Args&... args);
+
+ bool contextChecksEnabled() const {
+ // Don't perform these checks when called from a finalizer. The checking
+ // depends on other objects not having been swept yet.
+ return !RuntimeHeapIsCollecting(runtime()->heapState());
+ }
+
+ public:
+ // Assert the arguments are in this context's realm (for scripts),
+ // compartment (for objects) or zone (for strings, symbols).
+ template <class... Args>
+ inline void check(const Args&... args);
+ template <class... Args>
+ inline void releaseCheck(const Args&... args);
+ template <class... Args>
+ MOZ_ALWAYS_INLINE void debugOnlyCheck(const Args&... args);
+
+#ifdef JS_STRUCTURED_SPEW
+ private:
+ // Spewer for this thread
+ js::UnprotectedData<js::StructuredSpewer> structuredSpewer_;
+
+ public:
+ js::StructuredSpewer& spewer() { return structuredSpewer_.ref(); }
+#endif
+
+ // During debugger evaluations which need to observe native calls, JITs are
+ // completely disabled. This flag indicates whether we are in this state, and
+ // the debugger which initiated the evaluation. This debugger has other
+ // references on the stack and does not need to be traced.
+ js::ContextData<js::Debugger*> insideDebuggerEvaluationWithOnNativeCallHook;
+
+}; /* struct JSContext */
+
+inline JS::Result<> JSContext::boolToResult(bool ok) {
+ if (MOZ_LIKELY(ok)) {
+ MOZ_ASSERT(!isExceptionPending());
+ MOZ_ASSERT(!isPropagatingForcedReturn());
+ return JS::Ok();
+ }
+ return JS::Result<>(JS::Error());
+}
+
+inline JSContext* JSRuntime::mainContextFromOwnThread() {
+ MOZ_ASSERT(mainContextFromAnyThread() == js::TlsContext.get());
+ return mainContextFromAnyThread();
+}
+
+namespace js {
+
+struct MOZ_RAII AutoResolving {
+ public:
+ enum Kind { LOOKUP, WATCH };
+
+ AutoResolving(JSContext* cx, HandleObject obj, HandleId id,
+ Kind kind = LOOKUP)
+ : context(cx), object(obj), id(id), kind(kind), link(cx->resolvingList) {
+ MOZ_ASSERT(obj);
+ cx->resolvingList = this;
+ }
+
+ ~AutoResolving() {
+ MOZ_ASSERT(context->resolvingList == this);
+ context->resolvingList = link;
+ }
+
+ bool alreadyStarted() const { return link && alreadyStartedSlow(); }
+
+ private:
+ bool alreadyStartedSlow() const;
+
+ JSContext* const context;
+ HandleObject object;
+ HandleId id;
+ Kind const kind;
+ AutoResolving* const link;
+};
+
+/*
+ * Create and destroy functions for JSContext, which is manually allocated
+ * and exclusively owned.
+ */
+extern JSContext* NewContext(uint32_t maxBytes, JSRuntime* parentRuntime);
+
+extern void DestroyContext(JSContext* cx);
+
+/* |callee| requires a usage string provided by JS_DefineFunctionsWithHelp. */
+extern void ReportUsageErrorASCII(JSContext* cx, HandleObject callee,
+ const char* msg);
+
+extern void ReportIsNotDefined(JSContext* cx, Handle<PropertyName*> name);
+
+extern void ReportIsNotDefined(JSContext* cx, HandleId id);
+
+/*
+ * Report an attempt to access the property of a null or undefined value (v).
+ */
+extern void ReportIsNullOrUndefinedForPropertyAccess(JSContext* cx,
+ HandleValue v, int vIndex);
+extern void ReportIsNullOrUndefinedForPropertyAccess(JSContext* cx,
+ HandleValue v, int vIndex,
+ HandleId key);
+
+/*
+ * Report error using js::DecompileValueGenerator(cx, spindex, v, fallback) as
+ * the first argument for the error message.
+ */
+extern bool ReportValueError(JSContext* cx, const unsigned errorNumber,
+ int spindex, HandleValue v, HandleString fallback,
+ const char* arg1 = nullptr,
+ const char* arg2 = nullptr);
+
+JSObject* CreateErrorNotesArray(JSContext* cx, JSErrorReport* report);
+
+/************************************************************************/
+
+/*
+ * Encapsulates an external array of values and adds a trace method, for use in
+ * Rooted.
+ */
+class MOZ_STACK_CLASS ExternalValueArray {
+ public:
+ ExternalValueArray(size_t len, Value* vec) : array_(vec), length_(len) {}
+
+ Value* begin() { return array_; }
+ size_t length() { return length_; }
+
+ void trace(JSTracer* trc);
+
+ private:
+ Value* array_;
+ size_t length_;
+};
+
+/* RootedExternalValueArray roots an external array of Values. */
+class MOZ_RAII RootedExternalValueArray
+ : public JS::Rooted<ExternalValueArray> {
+ public:
+ RootedExternalValueArray(JSContext* cx, size_t len, Value* vec)
+ : JS::Rooted<ExternalValueArray>(cx, ExternalValueArray(len, vec)) {}
+
+ private:
+};
+
+class AutoAssertNoPendingException {
+#ifdef DEBUG
+ JSContext* cx_;
+
+ public:
+ explicit AutoAssertNoPendingException(JSContext* cxArg) : cx_(cxArg) {
+ MOZ_ASSERT(!JS_IsExceptionPending(cx_));
+ }
+
+ ~AutoAssertNoPendingException() { MOZ_ASSERT(!JS_IsExceptionPending(cx_)); }
+#else
+ public:
+ explicit AutoAssertNoPendingException(JSContext* cxArg) {}
+#endif
+};
+
+class MOZ_RAII AutoNoteDebuggerEvaluationWithOnNativeCallHook {
+ JSContext* cx;
+ Debugger* oldValue;
+
+ public:
+ AutoNoteDebuggerEvaluationWithOnNativeCallHook(JSContext* cx, Debugger* dbg)
+ : cx(cx), oldValue(cx->insideDebuggerEvaluationWithOnNativeCallHook) {
+ cx->insideDebuggerEvaluationWithOnNativeCallHook = dbg;
+ }
+
+ ~AutoNoteDebuggerEvaluationWithOnNativeCallHook() {
+ cx->insideDebuggerEvaluationWithOnNativeCallHook = oldValue;
+ }
+};
+
+enum UnsafeABIStrictness {
+ NoExceptions,
+ AllowPendingExceptions,
+ AllowThrownExceptions
+};
+
+// Should be used in functions called directly from JIT code (with
+// masm.callWithABI) to assert invariants in debug builds.
+// In debug mode, masm.callWithABI inserts code to verify that the
+// callee function uses AutoUnsafeCallWithABI.
+// While this object is live:
+// 1. cx->hasAutoUnsafeCallWithABI must be true.
+// 2. We can't GC.
+// 3. Exceptions should not be pending/thrown.
+//
+// Note that #3 is a precaution, not a requirement. By default, we
+// assert that the function is not called with a pending exception,
+// and that it does not throw an exception itself.
+class MOZ_RAII AutoUnsafeCallWithABI {
+#ifdef DEBUG
+ JSContext* cx_;
+ bool nested_;
+ bool checkForPendingException_;
+#endif
+ JS::AutoCheckCannotGC nogc;
+
+ public:
+#ifdef DEBUG
+ explicit AutoUnsafeCallWithABI(
+ UnsafeABIStrictness strictness = UnsafeABIStrictness::NoExceptions);
+ ~AutoUnsafeCallWithABI();
+#else
+ explicit AutoUnsafeCallWithABI(
+ UnsafeABIStrictness unused_ = UnsafeABIStrictness::NoExceptions) {}
+#endif
+};
+
+} /* namespace js */
+
+#define CHECK_THREAD(cx) \
+ MOZ_ASSERT_IF(cx, !cx->isHelperThreadContext() && \
+ js::CurrentThreadCanAccessRuntime(cx->runtime()))
+
+#endif /* vm_JSContext_h */
diff --git a/js/src/vm/JSFunction-inl.h b/js/src/vm/JSFunction-inl.h
new file mode 100644
index 0000000000..39563bbd67
--- /dev/null
+++ b/js/src/vm/JSFunction-inl.h
@@ -0,0 +1,141 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_JSFunction_inl_h
+#define vm_JSFunction_inl_h
+
+#include "vm/JSFunction.h"
+
+#include "gc/Allocator.h"
+#include "gc/GCProbes.h"
+#include "vm/WellKnownAtom.h" // js_*_str
+
+#include "gc/ObjectKind-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+
+namespace js {
+
+inline const char* GetFunctionNameBytes(JSContext* cx, JSFunction* fun,
+ UniqueChars* bytes) {
+ if (JSAtom* name = fun->explicitName()) {
+ *bytes = StringToNewUTF8CharsZ(cx, *name);
+ return bytes->get();
+ }
+ return js_anonymous_str;
+}
+
+} /* namespace js */
+
+/* static */
+inline JSFunction* JSFunction::create(JSContext* cx, js::gc::AllocKind kind,
+ js::gc::Heap heap,
+ js::Handle<js::SharedShape*> shape) {
+ MOZ_ASSERT(kind == js::gc::AllocKind::FUNCTION ||
+ kind == js::gc::AllocKind::FUNCTION_EXTENDED);
+
+ debugCheckNewObject(shape, kind, heap);
+
+ const JSClass* clasp = shape->getObjectClass();
+ MOZ_ASSERT(clasp->isNativeObject());
+ MOZ_ASSERT(clasp->isJSFunction());
+ MOZ_ASSERT_IF(kind == js::gc::AllocKind::FUNCTION,
+ clasp == js::FunctionClassPtr);
+ MOZ_ASSERT_IF(kind == js::gc::AllocKind::FUNCTION_EXTENDED,
+ clasp == js::FunctionExtendedClassPtr);
+
+ MOZ_ASSERT(calculateDynamicSlots(shape->numFixedSlots(), shape->slotSpan(),
+ clasp) == 0);
+
+ NativeObject* nobj = cx->newCell<NativeObject>(kind, heap, clasp);
+ if (!nobj) {
+ return nullptr;
+ }
+
+ nobj->initShape(shape);
+
+ nobj->initEmptyDynamicSlots();
+ nobj->setEmptyElements();
+
+ JSFunction* fun = static_cast<JSFunction*>(nobj);
+ fun->initFixedSlots(JSCLASS_RESERVED_SLOTS(clasp));
+ fun->initFlagsAndArgCount();
+ fun->initFixedSlot(NativeJitInfoOrInterpretedScriptSlot,
+ JS::PrivateValue(nullptr));
+
+ if (kind == js::gc::AllocKind::FUNCTION_EXTENDED) {
+ fun->setFlags(FunctionFlags::EXTENDED);
+ }
+
+ MOZ_ASSERT(!clasp->shouldDelayMetadataBuilder(),
+ "Function has no extra data hanging off it, that wouldn't be "
+ "allocated at this point, that would require delaying the "
+ "building of metadata for it");
+ if (MOZ_UNLIKELY(cx->realm()->hasAllocationMetadataBuilder())) {
+ fun = SetNewObjectMetadata(cx, fun);
+ }
+
+ js::gc::gcprobes::CreateObject(fun);
+
+ return fun;
+}
+
+/* static */
+inline bool JSFunction::getLength(JSContext* cx, js::HandleFunction fun,
+ uint16_t* length) {
+ if (fun->isNativeFun()) {
+ *length = fun->nargs();
+ return true;
+ }
+
+ JSScript* script = getOrCreateScript(cx, fun);
+ if (!script) {
+ return false;
+ }
+
+ *length = script->funLength();
+ return true;
+}
+
+/* static */
+inline bool JSFunction::getUnresolvedLength(JSContext* cx,
+ js::HandleFunction fun,
+ uint16_t* length) {
+ MOZ_ASSERT(!IsInternalFunctionObject(*fun));
+ MOZ_ASSERT(!fun->hasResolvedLength());
+
+ return JSFunction::getLength(cx, fun, length);
+}
+
+inline JSAtom* JSFunction::infallibleGetUnresolvedName(JSContext* cx) {
+ MOZ_ASSERT(!IsInternalFunctionObject(*this));
+ MOZ_ASSERT(!hasResolvedName());
+
+ if (JSAtom* name = explicitOrInferredName()) {
+ return name;
+ }
+
+ return cx->names().empty;
+}
+
+/* static */ inline bool JSFunction::getAllocKindForThis(
+ JSContext* cx, js::HandleFunction func, js::gc::AllocKind& allocKind) {
+ JSScript* script = getOrCreateScript(cx, func);
+ if (!script) {
+ return false;
+ }
+
+ size_t propertyCountEstimate =
+ script->immutableScriptData()->propertyCountEstimate;
+
+ // Choose the alloc assuming at least the default NewObjectKind slots, but
+ // bigger if our estimate shows we need it.
+ allocKind = js::gc::GetGCObjectKind(std::max(
+ js::gc::GetGCKindSlots(js::NewObjectGCKind()), propertyCountEstimate));
+ return true;
+}
+
+#endif /* vm_JSFunction_inl_h */
diff --git a/js/src/vm/JSFunction.cpp b/js/src/vm/JSFunction.cpp
new file mode 100644
index 0000000000..01e1da2f0a
--- /dev/null
+++ b/js/src/vm/JSFunction.cpp
@@ -0,0 +1,1979 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * JS function support.
+ */
+
+#include "vm/JSFunction-inl.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Range.h"
+
+#include <algorithm>
+#include <string.h>
+
+#include "jsapi.h"
+#include "jstypes.h"
+
+#include "builtin/Array.h"
+#include "builtin/BigInt.h"
+#include "builtin/Object.h"
+#include "builtin/Symbol.h"
+#include "frontend/BytecodeCompilation.h"
+#include "frontend/BytecodeCompiler.h"
+#include "frontend/FrontendContext.h" // AutoReportFrontendContext, ManualReportFrontendContext
+#include "jit/InlinableNatives.h"
+#include "jit/Ion.h"
+#include "js/CallNonGenericMethod.h"
+#include "js/CompilationAndEvaluation.h"
+#include "js/CompileOptions.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
+#include "js/PropertySpec.h"
+#include "js/SourceText.h"
+#include "js/StableStringChars.h"
+#include "js/Wrapper.h"
+#include "util/DifferentialTesting.h"
+#include "util/StringBuffer.h"
+#include "util/Text.h"
+#include "vm/BooleanObject.h"
+#include "vm/BoundFunctionObject.h"
+#include "vm/Compartment.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/GeneratorAndAsyncKind.h" // js::GeneratorKind, js::FunctionAsyncKind
+#include "vm/GlobalObject.h"
+#include "vm/Interpreter.h"
+#include "vm/JSAtom.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/JSScript.h"
+#include "vm/NumberObject.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/SelfHosting.h"
+#include "vm/Shape.h"
+#include "vm/StringObject.h"
+#include "vm/WellKnownAtom.h" // js_*_str
+#include "wasm/AsmJS.h"
+#ifdef ENABLE_RECORD_TUPLE
+# include "vm/RecordType.h"
+# include "vm/TupleType.h"
+#endif
+
+#include "vm/Interpreter-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+
+using mozilla::CheckedInt;
+using mozilla::Maybe;
+using mozilla::Some;
+
+using JS::AutoStableStringChars;
+using JS::CompileOptions;
+using JS::SourceOwnership;
+using JS::SourceText;
+
+static bool fun_enumerate(JSContext* cx, HandleObject obj) {
+ MOZ_ASSERT(obj->is<JSFunction>());
+
+ RootedId id(cx);
+ bool found;
+
+ if (obj->as<JSFunction>().needsPrototypeProperty()) {
+ id = NameToId(cx->names().prototype);
+ if (!HasOwnProperty(cx, obj, id, &found)) {
+ return false;
+ }
+ }
+
+ if (!obj->as<JSFunction>().hasResolvedLength()) {
+ id = NameToId(cx->names().length);
+ if (!HasOwnProperty(cx, obj, id, &found)) {
+ return false;
+ }
+ }
+
+ if (!obj->as<JSFunction>().hasResolvedName()) {
+ id = NameToId(cx->names().name);
+ if (!HasOwnProperty(cx, obj, id, &found)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool IsFunction(HandleValue v) {
+ return v.isObject() && v.toObject().is<JSFunction>();
+}
+
+static bool AdvanceToActiveCallLinear(JSContext* cx,
+ NonBuiltinScriptFrameIter& iter,
+ HandleFunction fun) {
+ MOZ_ASSERT(!fun->isBuiltin());
+
+ for (; !iter.done(); ++iter) {
+ if (!iter.isFunctionFrame()) {
+ continue;
+ }
+ if (iter.matchCallee(cx, fun)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void js::ThrowTypeErrorBehavior(JSContext* cx) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_THROW_TYPE_ERROR);
+}
+
+static bool IsSloppyNormalFunction(JSFunction* fun) {
+ // FunctionDeclaration or FunctionExpression in sloppy mode.
+ if (fun->kind() == FunctionFlags::NormalFunction) {
+ if (fun->isBuiltin()) {
+ return false;
+ }
+
+ if (fun->isGenerator() || fun->isAsync()) {
+ return false;
+ }
+
+ MOZ_ASSERT(fun->isInterpreted());
+ return !fun->strict();
+ }
+
+ // Or asm.js function in sloppy mode.
+ if (fun->kind() == FunctionFlags::AsmJS) {
+ return !IsAsmJSStrictModeModuleOrFunction(fun);
+ }
+
+ return false;
+}
+
+// Beware: this function can be invoked on *any* function! That includes
+// natives, strict mode functions, bound functions, arrow functions,
+// self-hosted functions and constructors, asm.js functions, functions with
+// destructuring arguments and/or a rest argument, and probably a few more I
+// forgot. Turn back and save yourself while you still can. It's too late for
+// me.
+static bool ArgumentsRestrictions(JSContext* cx, HandleFunction fun) {
+ // Throw unless the function is a sloppy, normal function.
+ // TODO (bug 1057208): ensure semantics are correct for all possible
+ // pairings of callee/caller.
+ if (!IsSloppyNormalFunction(fun)) {
+ ThrowTypeErrorBehavior(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool ArgumentsGetterImpl(JSContext* cx, const CallArgs& args) {
+ MOZ_ASSERT(IsFunction(args.thisv()));
+
+ RootedFunction fun(cx, &args.thisv().toObject().as<JSFunction>());
+ if (!ArgumentsRestrictions(cx, fun)) {
+ return false;
+ }
+
+ // Function.arguments isn't standard (not even Annex B), so it isn't
+ // worth the effort to guarantee that we can always recover it from
+ // an Ion frame. Always return null for differential fuzzing.
+ if (js::SupportDifferentialTesting()) {
+ args.rval().setNull();
+ return true;
+ }
+
+ // Return null if this function wasn't found on the stack.
+ NonBuiltinScriptFrameIter iter(cx);
+ if (!AdvanceToActiveCallLinear(cx, iter, fun)) {
+ args.rval().setNull();
+ return true;
+ }
+
+ Rooted<ArgumentsObject*> argsobj(cx,
+ ArgumentsObject::createUnexpected(cx, iter));
+ if (!argsobj) {
+ return false;
+ }
+
+#ifndef JS_CODEGEN_NONE
+ // Disabling compiling of this script in IonMonkey. IonMonkey doesn't
+ // guarantee |f.arguments| can be fully recovered, so we try to mitigate
+ // observing this behavior by detecting its use early.
+ JSScript* script = iter.script();
+ jit::ForbidCompilation(cx, script);
+#endif
+
+ args.rval().setObject(*argsobj);
+ return true;
+}
+
+static bool ArgumentsGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsFunction, ArgumentsGetterImpl>(cx, args);
+}
+
+bool ArgumentsSetterImpl(JSContext* cx, const CallArgs& args) {
+ MOZ_ASSERT(IsFunction(args.thisv()));
+
+ RootedFunction fun(cx, &args.thisv().toObject().as<JSFunction>());
+ if (!ArgumentsRestrictions(cx, fun)) {
+ return false;
+ }
+
+ // If the function passes the gauntlet, return |undefined|.
+ args.rval().setUndefined();
+ return true;
+}
+
+static bool ArgumentsSetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsFunction, ArgumentsSetterImpl>(cx, args);
+}
+
+// Beware: this function can be invoked on *any* function! That includes
+// natives, strict mode functions, bound functions, arrow functions,
+// self-hosted functions and constructors, asm.js functions, functions with
+// destructuring arguments and/or a rest argument, and probably a few more I
+// forgot. Turn back and save yourself while you still can. It's too late for
+// me.
+static bool CallerRestrictions(JSContext* cx, HandleFunction fun) {
+ // Throw unless the function is a sloppy, normal function.
+ // TODO (bug 1057208): ensure semantics are correct for all possible
+ // pairings of callee/caller.
+ if (!IsSloppyNormalFunction(fun)) {
+ ThrowTypeErrorBehavior(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool CallerGetterImpl(JSContext* cx, const CallArgs& args) {
+ MOZ_ASSERT(IsFunction(args.thisv()));
+
+ // Beware! This function can be invoked on *any* function! It can't
+ // assume it'll never be invoked on natives, strict mode functions, bound
+ // functions, or anything else that ordinarily has immutable .caller
+ // defined with [[ThrowTypeError]].
+ RootedFunction fun(cx, &args.thisv().toObject().as<JSFunction>());
+ if (!CallerRestrictions(cx, fun)) {
+ return false;
+ }
+
+ // Also return null if this function wasn't found on the stack.
+ NonBuiltinScriptFrameIter iter(cx);
+ if (!AdvanceToActiveCallLinear(cx, iter, fun)) {
+ args.rval().setNull();
+ return true;
+ }
+
+ ++iter;
+ while (!iter.done() && iter.isEvalFrame()) {
+ ++iter;
+ }
+
+ if (iter.done() || !iter.isFunctionFrame()) {
+ args.rval().setNull();
+ return true;
+ }
+
+ RootedObject caller(cx, iter.callee(cx));
+ if (!cx->compartment()->wrap(cx, &caller)) {
+ return false;
+ }
+
+ // Censor the caller if we don't have full access to it. If we do, but the
+ // caller is a function with strict mode code, throw a TypeError per ES5.
+ // If we pass these checks, we can return the computed caller.
+ {
+ JSObject* callerObj = CheckedUnwrapStatic(caller);
+ if (!callerObj) {
+ args.rval().setNull();
+ return true;
+ }
+
+ if (JS_IsDeadWrapper(callerObj)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_DEAD_OBJECT);
+ return false;
+ }
+
+ JSFunction* callerFun = &callerObj->as<JSFunction>();
+ MOZ_ASSERT(!callerFun->isBuiltin(),
+ "non-builtin iterator returned a builtin?");
+
+ if (callerFun->strict() || callerFun->isAsync() ||
+ callerFun->isGenerator()) {
+ args.rval().setNull();
+ return true;
+ }
+ }
+
+ args.rval().setObject(*caller);
+ return true;
+}
+
+static bool CallerGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsFunction, CallerGetterImpl>(cx, args);
+}
+
+bool CallerSetterImpl(JSContext* cx, const CallArgs& args) {
+ MOZ_ASSERT(IsFunction(args.thisv()));
+
+ // We just have to return |undefined|, but first we call CallerGetterImpl
+ // because we need the same strict-mode and security checks.
+
+ if (!CallerGetterImpl(cx, args)) {
+ return false;
+ }
+
+ args.rval().setUndefined();
+ return true;
+}
+
+static bool CallerSetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsFunction, CallerSetterImpl>(cx, args);
+}
+
+static const JSPropertySpec function_properties[] = {
+ JS_PSGS("arguments", ArgumentsGetter, ArgumentsSetter, 0),
+ JS_PSGS("caller", CallerGetter, CallerSetter, 0), JS_PS_END};
+
+static bool ResolveInterpretedFunctionPrototype(JSContext* cx,
+ HandleFunction fun,
+ HandleId id) {
+ MOZ_ASSERT(fun->isInterpreted() || fun->isAsmJSNative());
+ MOZ_ASSERT(id == NameToId(cx->names().prototype));
+
+ // Assert that fun is not a compiler-created function object, which
+ // must never leak to script or embedding code and then be mutated.
+ // Also assert that fun is not bound, per the ES5 15.3.4.5 ref above.
+ MOZ_ASSERT(!IsInternalFunctionObject(*fun));
+
+ // Make the prototype object an instance of Object with the same parent as
+ // the function object itself, unless the function is an ES6 generator. In
+ // that case, per the 15 July 2013 ES6 draft, section 15.19.3, its parent is
+ // the GeneratorObjectPrototype singleton.
+ bool isGenerator = fun->isGenerator();
+ Rooted<GlobalObject*> global(cx, &fun->global());
+ RootedObject objProto(cx);
+ if (isGenerator && fun->isAsync()) {
+ objProto = GlobalObject::getOrCreateAsyncGeneratorPrototype(cx, global);
+ } else if (isGenerator) {
+ objProto = GlobalObject::getOrCreateGeneratorObjectPrototype(cx, global);
+ } else {
+ objProto = &global->getObjectPrototype();
+ }
+ if (!objProto) {
+ return false;
+ }
+
+ Rooted<PlainObject*> proto(
+ cx, NewPlainObjectWithProto(cx, objProto, TenuredObject));
+ if (!proto) {
+ return false;
+ }
+
+ // Per ES5 13.2 the prototype's .constructor property is configurable,
+ // non-enumerable, and writable. However, per the 15 July 2013 ES6 draft,
+ // section 15.19.3, the .prototype of a generator function does not link
+ // back with a .constructor.
+ if (!isGenerator) {
+ RootedValue objVal(cx, ObjectValue(*fun));
+ if (!DefineDataProperty(cx, proto, cx->names().constructor, objVal, 0)) {
+ return false;
+ }
+ }
+
+ // Per ES5 15.3.5.2 a user-defined function's .prototype property is
+ // initially non-configurable, non-enumerable, and writable.
+ RootedValue protoVal(cx, ObjectValue(*proto));
+ return DefineDataProperty(cx, fun, id, protoVal,
+ JSPROP_PERMANENT | JSPROP_RESOLVING);
+}
+
+bool JSFunction::needsPrototypeProperty() {
+ /*
+ * Built-in functions do not have a .prototype property per ECMA-262,
+ * or (Object.prototype, Function.prototype, etc.) have that property
+ * created eagerly.
+ *
+ * ES6 9.2.8 MakeConstructor defines the .prototype property on constructors.
+ * Generators are not constructors, but they have a .prototype property
+ * anyway, according to errata to ES6. See bug 1191486.
+ *
+ * Thus all of the following don't get a .prototype property:
+ * - Methods (that are not class-constructors or generators)
+ * - Arrow functions
+ * - Function.prototype
+ * - Async functions
+ */
+ return !isBuiltin() && (isConstructor() || isGenerator());
+}
+
+bool JSFunction::hasNonConfigurablePrototypeDataProperty() {
+ if (!isBuiltin()) {
+ return needsPrototypeProperty();
+ }
+
+ if (isSelfHostedBuiltin()) {
+ // Self-hosted constructors have a non-configurable .prototype data
+ // property.
+ if (!isConstructor()) {
+ return false;
+ }
+#ifdef DEBUG
+ PropertyName* prototypeName =
+ runtimeFromMainThread()->commonNames->prototype;
+ Maybe<PropertyInfo> prop = lookupPure(prototypeName);
+ MOZ_ASSERT(prop.isSome());
+ MOZ_ASSERT(prop->isDataProperty());
+ MOZ_ASSERT(!prop->configurable());
+#endif
+ return true;
+ }
+
+ if (!isConstructor()) {
+ // We probably don't have a .prototype property. Avoid the lookup below.
+ return false;
+ }
+
+ PropertyName* prototypeName = runtimeFromMainThread()->commonNames->prototype;
+ Maybe<PropertyInfo> prop = lookupPure(prototypeName);
+ return prop.isSome() && prop->isDataProperty() && !prop->configurable();
+}
+
+static bool fun_mayResolve(const JSAtomState& names, jsid id, JSObject*) {
+ if (!id.isAtom()) {
+ return false;
+ }
+
+ JSAtom* atom = id.toAtom();
+ return atom == names.prototype || atom == names.length || atom == names.name;
+}
+
+static bool fun_resolve(JSContext* cx, HandleObject obj, HandleId id,
+ bool* resolvedp) {
+ if (!id.isAtom()) {
+ return true;
+ }
+
+ RootedFunction fun(cx, &obj->as<JSFunction>());
+
+ if (id.isAtom(cx->names().prototype)) {
+ if (!fun->needsPrototypeProperty()) {
+ return true;
+ }
+
+ if (!ResolveInterpretedFunctionPrototype(cx, fun, id)) {
+ return false;
+ }
+
+ *resolvedp = true;
+ return true;
+ }
+
+ bool isLength = id.isAtom(cx->names().length);
+ if (isLength || id.isAtom(cx->names().name)) {
+ MOZ_ASSERT(!IsInternalFunctionObject(*obj));
+
+ RootedValue v(cx);
+
+ // Since f.length and f.name are configurable, they could be resolved
+ // and then deleted:
+ // function f(x) {}
+ // assertEq(f.length, 1);
+ // delete f.length;
+ // assertEq(f.name, "f");
+ // delete f.name;
+ // Afterwards, asking for f.length or f.name again will cause this
+ // resolve hook to run again. Defining the property again the second
+ // time through would be a bug.
+ // assertEq(f.length, 0); // gets Function.prototype.length!
+ // assertEq(f.name, ""); // gets Function.prototype.name!
+ // We use the RESOLVED_LENGTH and RESOLVED_NAME flags as a hack to prevent
+ // this bug.
+ if (isLength) {
+ if (fun->hasResolvedLength()) {
+ return true;
+ }
+
+ uint16_t len = 0;
+ if (!JSFunction::getUnresolvedLength(cx, fun, &len)) {
+ return false;
+ }
+ v.setInt32(len);
+ } else {
+ if (fun->hasResolvedName()) {
+ return true;
+ }
+
+ v.setString(fun->infallibleGetUnresolvedName(cx));
+ }
+
+ if (!NativeDefineDataProperty(cx, fun, id, v,
+ JSPROP_READONLY | JSPROP_RESOLVING)) {
+ return false;
+ }
+
+ if (isLength) {
+ fun->setResolvedLength();
+ } else {
+ fun->setResolvedName();
+ }
+
+ *resolvedp = true;
+ return true;
+ }
+
+ return true;
+}
+
+/* ES6 (04-25-16) 19.2.3.6 Function.prototype [ @@hasInstance ] */
+static bool fun_symbolHasInstance(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (args.length() < 1) {
+ args.rval().setBoolean(false);
+ return true;
+ }
+
+ /* Step 1. */
+ HandleValue func = args.thisv();
+
+ // Primitives are non-callable and will always return false from
+ // OrdinaryHasInstance.
+ if (!func.isObject()) {
+ args.rval().setBoolean(false);
+ return true;
+ }
+
+ RootedObject obj(cx, &func.toObject());
+
+ /* Step 2. */
+ bool result;
+ if (!OrdinaryHasInstance(cx, obj, args[0], &result)) {
+ return false;
+ }
+
+ args.rval().setBoolean(result);
+ return true;
+}
+
+/*
+ * ES6 (4-25-16) 7.3.19 OrdinaryHasInstance
+ */
+bool JS::OrdinaryHasInstance(JSContext* cx, HandleObject objArg, HandleValue v,
+ bool* bp) {
+ AssertHeapIsIdle();
+ cx->check(objArg, v);
+
+ RootedObject obj(cx, objArg);
+
+ /* Step 1. */
+ if (!obj->isCallable()) {
+ *bp = false;
+ return true;
+ }
+
+ /* Step 2. */
+ if (obj->is<BoundFunctionObject>()) {
+ /* Steps 2a-b. */
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return false;
+ }
+ obj = obj->as<BoundFunctionObject>().getTarget();
+ return InstanceofOperator(cx, obj, v, bp);
+ }
+
+ /* Step 3. */
+ if (!v.isObject()) {
+ *bp = false;
+ return true;
+ }
+
+ /* Step 4. */
+ RootedValue pval(cx);
+ if (!GetProperty(cx, obj, obj, cx->names().prototype, &pval)) {
+ return false;
+ }
+
+ /* Step 5. */
+ if (pval.isPrimitive()) {
+ /*
+ * Throw a runtime error if instanceof is called on a function that
+ * has a non-object as its .prototype value.
+ */
+ RootedValue val(cx, ObjectValue(*obj));
+ ReportValueError(cx, JSMSG_BAD_PROTOTYPE, -1, val, nullptr);
+ return false;
+ }
+
+ /* Step 6. */
+ RootedObject pobj(cx, &pval.toObject());
+ bool isPrototype;
+ if (!IsPrototypeOf(cx, pobj, &v.toObject(), &isPrototype)) {
+ return false;
+ }
+ *bp = isPrototype;
+ return true;
+}
+
+inline void JSFunction::trace(JSTracer* trc) {
+ // Functions can be be marked as interpreted despite having no script yet at
+ // some points when parsing, and can be lazy with no lazy script for
+ // self-hosted code.
+ MOZ_ASSERT(!getFixedSlot(NativeJitInfoOrInterpretedScriptSlot).isGCThing());
+ if (isInterpreted() && hasBaseScript()) {
+ if (BaseScript* script = baseScript()) {
+ TraceManuallyBarrieredEdge(trc, &script, "JSFunction script");
+ // Self-hosted scripts are shared with workers but are never relocated.
+ // Skip unnecessary writes to prevent the possible data race.
+ if (baseScript() != script) {
+ HeapSlot& slot = getFixedSlotRef(NativeJitInfoOrInterpretedScriptSlot);
+ slot.unbarrieredSet(JS::PrivateValue(script));
+ }
+ }
+ }
+ // wasm/asm.js exported functions need to keep WasmInstantObject alive,
+ // access it via WASM_INSTANCE_SLOT extended slot.
+ if (isAsmJSNative() || isWasm()) {
+ const Value& v = getExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT);
+ if (!v.isUndefined()) {
+ auto* instance = static_cast<wasm::Instance*>(v.toPrivate());
+ wasm::TraceInstanceEdge(trc, instance, "JSFunction instance");
+ }
+ }
+}
+
+static void fun_trace(JSTracer* trc, JSObject* obj) {
+ obj->as<JSFunction>().trace(trc);
+}
+
+static JSObject* CreateFunctionConstructor(JSContext* cx, JSProtoKey key) {
+ Rooted<GlobalObject*> global(cx, cx->global());
+ RootedObject functionProto(cx, &global->getPrototype(JSProto_Function));
+
+ RootedObject functionCtor(
+ cx, NewFunctionWithProto(
+ cx, Function, 1, FunctionFlags::NATIVE_CTOR, nullptr,
+ Handle<PropertyName*>(cx->names().Function), functionProto,
+ gc::AllocKind::FUNCTION, TenuredObject));
+ if (!functionCtor) {
+ return nullptr;
+ }
+
+ return functionCtor;
+}
+
+static bool FunctionPrototype(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setUndefined();
+ return true;
+}
+
+static JSObject* CreateFunctionPrototype(JSContext* cx, JSProtoKey key) {
+ Rooted<GlobalObject*> self(cx, cx->global());
+
+ RootedObject objectProto(cx, &self->getPrototype(JSProto_Object));
+
+ return NewFunctionWithProto(
+ cx, FunctionPrototype, 0, FunctionFlags::NATIVE_FUN, nullptr,
+ Handle<PropertyName*>(cx->names().empty), objectProto,
+ gc::AllocKind::FUNCTION, TenuredObject);
+}
+
+JSString* js::FunctionToStringCache::lookup(BaseScript* script) const {
+ for (size_t i = 0; i < NumEntries; i++) {
+ if (entries_[i].script == script) {
+ return entries_[i].string;
+ }
+ }
+ return nullptr;
+}
+
+void js::FunctionToStringCache::put(BaseScript* script, JSString* string) {
+ for (size_t i = NumEntries - 1; i > 0; i--) {
+ entries_[i] = entries_[i - 1];
+ }
+
+ entries_[0].set(script, string);
+}
+
+JSString* js::FunctionToString(JSContext* cx, HandleFunction fun,
+ bool isToSource) {
+ if (IsAsmJSModule(fun)) {
+ return AsmJSModuleToString(cx, fun, isToSource);
+ }
+ if (IsAsmJSFunction(fun)) {
+ return AsmJSFunctionToString(cx, fun);
+ }
+
+ // Self-hosted built-ins should not expose their source code.
+ bool haveSource = fun->isInterpreted() && !fun->isSelfHostedBuiltin();
+
+ // If we're in toSource mode, put parentheses around lambda functions so
+ // that eval returns lambda, not function statement.
+ bool addParentheses =
+ haveSource && isToSource && (fun->isLambda() && !fun->isArrow());
+
+ if (haveSource) {
+ if (!ScriptSource::loadSource(cx, fun->baseScript()->scriptSource(),
+ &haveSource)) {
+ return nullptr;
+ }
+ }
+
+ // Fast path for the common case, to avoid StringBuffer overhead.
+ if (!addParentheses && haveSource) {
+ FunctionToStringCache& cache = cx->zone()->functionToStringCache();
+ if (JSString* str = cache.lookup(fun->baseScript())) {
+ return str;
+ }
+
+ BaseScript* script = fun->baseScript();
+ size_t start = script->toStringStart();
+ size_t end = script->toStringEnd();
+ JSString* str =
+ (end - start <= ScriptSource::SourceDeflateLimit)
+ ? script->scriptSource()->substring(cx, start, end)
+ : script->scriptSource()->substringDontDeflate(cx, start, end);
+ if (!str) {
+ return nullptr;
+ }
+
+ cache.put(fun->baseScript(), str);
+ return str;
+ }
+
+ JSStringBuilder out(cx);
+ if (addParentheses) {
+ if (!out.append('(')) {
+ return nullptr;
+ }
+ }
+
+ if (haveSource) {
+ if (!fun->baseScript()->appendSourceDataForToString(cx, out)) {
+ return nullptr;
+ }
+ } else if (!isToSource) {
+ // For the toString() output the source representation must match
+ // NativeFunction when no source text is available.
+ //
+ // NativeFunction:
+ // function PropertyName[~Yield,~Await]opt (
+ // FormalParameters[~Yield,~Await] ) { [native code] }
+ //
+ // Additionally, if |fun| is a well-known intrinsic object and is not
+ // identified as an anonymous function, the portion of the returned
+ // string that would be matched by IdentifierName must be the initial
+ // value of the name property of |fun|.
+
+ auto hasGetterOrSetterPrefix = [](JSAtom* name) {
+ auto hasGetterOrSetterPrefix = [](const auto* chars) {
+ return (chars[0] == 'g' || chars[0] == 's') && chars[1] == 'e' &&
+ chars[2] == 't' && chars[3] == ' ';
+ };
+
+ JS::AutoCheckCannotGC nogc;
+ return name->length() >= 4 &&
+ (name->hasLatin1Chars()
+ ? hasGetterOrSetterPrefix(name->latin1Chars(nogc))
+ : hasGetterOrSetterPrefix(name->twoByteChars(nogc)));
+ };
+
+ if (!out.append("function")) {
+ return nullptr;
+ }
+
+ // We don't want to fully parse the function's name here because of
+ // performance reasons, so only append the name if we're confident it
+ // can be matched as the 'PropertyName' grammar production.
+ if (fun->explicitName() &&
+ (fun->kind() == FunctionFlags::NormalFunction ||
+ fun->kind() == FunctionFlags::Wasm ||
+ fun->kind() == FunctionFlags::ClassConstructor)) {
+ if (!out.append(' ')) {
+ return nullptr;
+ }
+
+ // Built-in getters or setters are classified as normal
+ // functions, strip any leading "get " or "set " if present.
+ JSAtom* name = fun->explicitName();
+ size_t offset = hasGetterOrSetterPrefix(name) ? 4 : 0;
+ if (!out.appendSubstring(name, offset, name->length() - offset)) {
+ return nullptr;
+ }
+ }
+
+ if (!out.append("() {\n [native code]\n}")) {
+ return nullptr;
+ }
+ } else {
+ if (fun->isAsync()) {
+ if (!out.append("async ")) {
+ return nullptr;
+ }
+ }
+
+ if (!fun->isArrow()) {
+ if (!out.append("function")) {
+ return nullptr;
+ }
+
+ if (fun->isGenerator()) {
+ if (!out.append('*')) {
+ return nullptr;
+ }
+ }
+ }
+
+ if (fun->explicitName()) {
+ if (!out.append(' ')) {
+ return nullptr;
+ }
+ if (!out.append(fun->explicitName())) {
+ return nullptr;
+ }
+ }
+
+ if (!out.append("() {\n [native code]\n}")) {
+ return nullptr;
+ }
+ }
+
+ if (addParentheses) {
+ if (!out.append(')')) {
+ return nullptr;
+ }
+ }
+
+ return out.finishString();
+}
+
+JSString* fun_toStringHelper(JSContext* cx, HandleObject obj, bool isToSource) {
+ if (!obj->is<JSFunction>()) {
+ if (JSFunToStringOp op = obj->getOpsFunToString()) {
+ return op(cx, obj, isToSource);
+ }
+
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_INCOMPATIBLE_PROTO, js_Function_str,
+ js_toString_str, "object");
+ return nullptr;
+ }
+
+ return FunctionToString(cx, obj.as<JSFunction>(), isToSource);
+}
+
+bool js::fun_toString(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(IsFunctionObject(args.calleev()));
+
+ RootedObject obj(cx, ToObject(cx, args.thisv()));
+ if (!obj) {
+ return false;
+ }
+
+ JSString* str = fun_toStringHelper(cx, obj, /* isToSource = */ false);
+ if (!str) {
+ return false;
+ }
+
+ args.rval().setString(str);
+ return true;
+}
+
+static bool fun_toSource(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(IsFunctionObject(args.calleev()));
+
+ RootedObject obj(cx, ToObject(cx, args.thisv()));
+ if (!obj) {
+ return false;
+ }
+
+ RootedString str(cx);
+ if (obj->isCallable()) {
+ str = fun_toStringHelper(cx, obj, /* isToSource = */ true);
+ } else {
+ str = ObjectToSource(cx, obj);
+ }
+ if (!str) {
+ return false;
+ }
+
+ args.rval().setString(str);
+ return true;
+}
+
+bool js::fun_call(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ HandleValue func = args.thisv();
+
+ // We don't need to do this -- Call would do it for us -- but the error
+ // message is *much* better if we do this here. (Without this,
+ // JSDVG_SEARCH_STACK tries to decompile |func| as if it were |this| in
+ // the scripted caller's frame -- so for example
+ //
+ // Function.prototype.call.call({});
+ //
+ // would identify |{}| as |this| as being the result of evaluating
+ // |Function.prototype.call| and would conclude, "Function.prototype.call
+ // is not a function". Grotesque.)
+ if (!IsCallable(func)) {
+ ReportIncompatibleMethod(cx, args, &FunctionClass);
+ return false;
+ }
+
+ size_t argCount = args.length();
+ if (argCount > 0) {
+ argCount--; // strip off provided |this|
+ }
+
+ InvokeArgs iargs(cx);
+ if (!iargs.init(cx, argCount)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < argCount; i++) {
+ iargs[i].set(args[i + 1]);
+ }
+
+ return Call(cx, func, args.get(0), iargs, args.rval(), CallReason::FunCall);
+}
+
+// ES5 15.3.4.3
+bool js::fun_apply(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Step 1.
+ //
+ // Note that we must check callability here, not at actual call time,
+ // because extracting argument values from the provided arraylike might
+ // have side effects or throw an exception.
+ HandleValue fval = args.thisv();
+ if (!IsCallable(fval)) {
+ ReportIncompatibleMethod(cx, args, &FunctionClass);
+ return false;
+ }
+
+ // Step 2.
+ if (args.length() < 2 || args[1].isNullOrUndefined()) {
+ return fun_call(cx, (args.length() > 0) ? 1 : 0, vp);
+ }
+
+ // Step 3.
+ if (!args[1].isObject()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_APPLY_ARGS, js_apply_str);
+ return false;
+ }
+
+ // Steps 4-5 (note erratum removing steps originally numbered 5 and 7 in
+ // original version of ES5).
+ RootedObject aobj(cx, &args[1].toObject());
+ uint64_t length;
+ if (!GetLengthProperty(cx, aobj, &length)) {
+ return false;
+ }
+
+ // Step 6.
+ InvokeArgs args2(cx);
+ if (!args2.init(cx, length)) {
+ return false;
+ }
+
+ MOZ_ASSERT(length <= ARGS_LENGTH_MAX);
+
+ // Steps 7-8.
+ if (!GetElements(cx, aobj, length, args2.array())) {
+ return false;
+ }
+
+ // Step 9.
+ return Call(cx, fval, args[0], args2, args.rval(), CallReason::FunCall);
+}
+
+static const JSFunctionSpec function_methods[] = {
+ JS_FN(js_toSource_str, fun_toSource, 0, 0),
+ JS_FN(js_toString_str, fun_toString, 0, 0),
+ JS_FN(js_apply_str, fun_apply, 2, 0),
+ JS_FN(js_call_str, fun_call, 1, 0),
+ JS_INLINABLE_FN("bind", BoundFunctionObject::functionBind, 1, 0,
+ FunctionBind),
+ JS_SYM_FN(hasInstance, fun_symbolHasInstance, 1,
+ JSPROP_READONLY | JSPROP_PERMANENT),
+ JS_FS_END};
+
+static const JSClassOps JSFunctionClassOps = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ fun_enumerate, // enumerate
+ nullptr, // newEnumerate
+ fun_resolve, // resolve
+ fun_mayResolve, // mayResolve
+ nullptr, // finalize
+ nullptr, // call
+ nullptr, // construct
+ fun_trace, // trace
+};
+
+static const ClassSpec JSFunctionClassSpec = {
+ CreateFunctionConstructor, CreateFunctionPrototype, nullptr, nullptr,
+ function_methods, function_properties};
+
+const JSClass js::FunctionClass = {
+ js_Function_str,
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Function) |
+ JSCLASS_HAS_RESERVED_SLOTS(JSFunction::SlotCount),
+ &JSFunctionClassOps, &JSFunctionClassSpec};
+
+const JSClass js::ExtendedFunctionClass = {
+ js_Function_str,
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Function) |
+ JSCLASS_HAS_RESERVED_SLOTS(FunctionExtended::SlotCount),
+ &JSFunctionClassOps, &JSFunctionClassSpec};
+
+const JSClass* const js::FunctionClassPtr = &FunctionClass;
+const JSClass* const js::FunctionExtendedClassPtr = &ExtendedFunctionClass;
+
+bool JSFunction::isDerivedClassConstructor() const {
+ bool derived = hasBaseScript() && baseScript()->isDerivedClassConstructor();
+ MOZ_ASSERT_IF(derived, isClassConstructor());
+ return derived;
+}
+
+bool JSFunction::isSyntheticFunction() const {
+ bool synthetic = hasBaseScript() && baseScript()->isSyntheticFunction();
+ MOZ_ASSERT_IF(synthetic, isMethod());
+ return synthetic;
+}
+
+/* static */
+bool JSFunction::delazifyLazilyInterpretedFunction(JSContext* cx,
+ HandleFunction fun) {
+ MOZ_ASSERT(fun->hasBaseScript());
+ MOZ_ASSERT(cx->compartment() == fun->compartment());
+
+ // The function must be same-compartment but might be cross-realm. Make sure
+ // the script is created in the function's realm.
+ AutoRealm ar(cx, fun);
+
+ Rooted<BaseScript*> lazy(cx, fun->baseScript());
+ RootedFunction canonicalFun(cx, lazy->function());
+
+ // If this function is non-canonical, then use the canonical function first
+ // to get the delazified script. This may result in calling this method
+ // again on the canonical function. This ensures the canonical function is
+ // always non-lazy if any of the clones are non-lazy.
+ if (fun != canonicalFun) {
+ JSScript* script = JSFunction::getOrCreateScript(cx, canonicalFun);
+ if (!script) {
+ return false;
+ }
+
+ // Delazifying the canonical function should naturally make us non-lazy
+ // because we share a BaseScript with the canonical function.
+ MOZ_ASSERT(fun->hasBytecode());
+ return true;
+ }
+
+ // Finally, compile the script if it really doesn't exist.
+ AutoReportFrontendContext fc(cx);
+ if (!frontend::DelazifyCanonicalScriptedFunction(cx, &fc, fun)) {
+ // The frontend shouldn't fail after linking the function and the
+ // non-lazy script together.
+ MOZ_ASSERT(fun->baseScript() == lazy);
+ MOZ_ASSERT(lazy->isReadyForDelazification());
+ return false;
+ }
+
+ return true;
+}
+
+/* static */
+bool JSFunction::delazifySelfHostedLazyFunction(JSContext* cx,
+ js::HandleFunction fun) {
+ MOZ_ASSERT(cx->compartment() == fun->compartment());
+
+ // The function must be same-compartment but might be cross-realm. Make sure
+ // the script is created in the function's realm.
+ AutoRealm ar(cx, fun);
+
+ /* Lazily cloned self-hosted script. */
+ MOZ_ASSERT(fun->isSelfHostedBuiltin());
+ Rooted<PropertyName*> funName(cx, GetClonedSelfHostedFunctionName(fun));
+ if (!funName) {
+ return false;
+ }
+ return cx->runtime()->delazifySelfHostedFunction(cx, funName, fun);
+}
+
+void JSFunction::maybeRelazify(JSRuntime* rt) {
+ MOZ_ASSERT(!isIncomplete(), "Cannot relazify incomplete functions");
+
+ // Don't relazify functions in compartments that are active.
+ Realm* realm = this->realm();
+ if (!rt->allowRelazificationForTesting) {
+ if (realm->compartment()->gcState.hasEnteredRealm) {
+ return;
+ }
+
+ MOZ_ASSERT(!realm->hasBeenEnteredIgnoringJit());
+ }
+
+ // Don't relazify if the realm is being debugged. The debugger side-tables
+ // such as the set of active breakpoints require bytecode to exist.
+ if (realm->isDebuggee()) {
+ return;
+ }
+
+ // Don't relazify if we are collecting coverage so that we do not lose count
+ // information.
+ if (coverage::IsLCovEnabled()) {
+ return;
+ }
+
+ // Check the script's eligibility.
+ JSScript* script = nonLazyScript();
+ if (!script->allowRelazify()) {
+ return;
+ }
+ MOZ_ASSERT(script->isRelazifiable());
+
+ // There must not be any JIT code attached since the relazification process
+ // does not know how to discard it. In general, the GC should discard most JIT
+ // code before attempting relazification.
+ if (script->hasJitScript()) {
+ return;
+ }
+
+ if (isSelfHostedBuiltin()) {
+ gc::PreWriteBarrier(script);
+ initSelfHostedLazyScript(&rt->selfHostedLazyScript.ref());
+ } else {
+ script->relazify(rt);
+ }
+}
+
+js::GeneratorKind JSFunction::clonedSelfHostedGeneratorKind() const {
+ MOZ_ASSERT(hasSelfHostedLazyScript());
+
+ // This is a lazy clone of a self-hosted builtin. It has no BaseScript, and
+ // `this->flags_` does not contain the generator kind. Consult the
+ // implementation in the self-hosting realm, which has a BaseScript.
+ MOZ_RELEASE_ASSERT(isExtended());
+ PropertyName* name = GetClonedSelfHostedFunctionName(this);
+ return runtimeFromMainThread()->getSelfHostedFunctionGeneratorKind(name);
+}
+
+// ES2018 draft rev 2aea8f3e617b49df06414eb062ab44fad87661d3
+// 19.2.1.1.1 CreateDynamicFunction( constructor, newTarget, kind, args )
+static bool CreateDynamicFunction(JSContext* cx, const CallArgs& args,
+ GeneratorKind generatorKind,
+ FunctionAsyncKind asyncKind) {
+ using namespace frontend;
+
+ // Steps 1-5.
+ bool isGenerator = generatorKind == GeneratorKind::Generator;
+ bool isAsync = asyncKind == FunctionAsyncKind::AsyncFunction;
+
+ RootedScript maybeScript(cx);
+ const char* filename;
+ unsigned lineno;
+ bool mutedErrors;
+ uint32_t pcOffset;
+ DescribeScriptedCallerForCompilation(cx, &maybeScript, &filename, &lineno,
+ &pcOffset, &mutedErrors);
+
+ const char* introductionType = "Function";
+ if (isAsync) {
+ if (isGenerator) {
+ introductionType = "AsyncGenerator";
+ } else {
+ introductionType = "AsyncFunction";
+ }
+ } else if (isGenerator) {
+ introductionType = "GeneratorFunction";
+ }
+
+ const char* introducerFilename = filename;
+ if (maybeScript && maybeScript->scriptSource()->introducerFilename()) {
+ introducerFilename = maybeScript->scriptSource()->introducerFilename();
+ }
+
+ CompileOptions options(cx);
+ options.setMutedErrors(mutedErrors)
+ .setFileAndLine(filename, 1)
+ .setNoScriptRval(false)
+ .setIntroductionInfo(introducerFilename, introductionType, lineno,
+ pcOffset)
+ .setDeferDebugMetadata();
+
+ JSStringBuilder sb(cx);
+
+ if (isAsync) {
+ if (!sb.append("async ")) {
+ return false;
+ }
+ }
+ if (!sb.append("function")) {
+ return false;
+ }
+ if (isGenerator) {
+ if (!sb.append('*')) {
+ return false;
+ }
+ }
+
+ if (!sb.append(" anonymous(")) {
+ return false;
+ }
+
+ if (args.length() > 1) {
+ RootedString str(cx);
+
+ // Steps 10, 14.d.
+ unsigned n = args.length() - 1;
+
+ for (unsigned i = 0; i < n; i++) {
+ // Steps 14.a-b, 14.d.i-ii.
+ str = ToString<CanGC>(cx, args[i]);
+ if (!str) {
+ return false;
+ }
+
+ // Steps 14.b, 14.d.iii.
+ if (!sb.append(str)) {
+ return false;
+ }
+
+ if (i < args.length() - 2) {
+ // Step 14.d.iii.
+ if (!sb.append(',')) {
+ return false;
+ }
+ }
+ }
+ }
+
+ if (!sb.append('\n')) {
+ return false;
+ }
+
+ // Remember the position of ")".
+ Maybe<uint32_t> parameterListEnd = Some(uint32_t(sb.length()));
+ static_assert(FunctionConstructorMedialSigils[0] == ')');
+
+ if (!sb.append(FunctionConstructorMedialSigils.data(),
+ FunctionConstructorMedialSigils.length())) {
+ return false;
+ }
+
+ if (args.length() > 0) {
+ // Steps 13, 14.e, 15.
+ RootedString body(cx, ToString<CanGC>(cx, args[args.length() - 1]));
+ if (!body || !sb.append(body)) {
+ return false;
+ }
+ }
+
+ if (!sb.append(FunctionConstructorFinalBrace.data(),
+ FunctionConstructorFinalBrace.length())) {
+ return false;
+ }
+
+ // The parser only accepts two byte strings.
+ if (!sb.ensureTwoByteChars()) {
+ return false;
+ }
+
+ RootedString functionText(cx, sb.finishString());
+ if (!functionText) {
+ return false;
+ }
+
+ // Block this call if security callbacks forbid it.
+ if (!cx->isRuntimeCodeGenEnabled(JS::RuntimeCode::JS, functionText)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CSP_BLOCKED_FUNCTION);
+ return false;
+ }
+
+ // Steps 7.a-b, 8.a-b, 9.a-b, 16-28.
+ AutoStableStringChars linearChars(cx);
+ if (!linearChars.initTwoByte(cx, functionText)) {
+ return false;
+ }
+
+ SourceText<char16_t> srcBuf;
+ if (!srcBuf.initMaybeBorrowed(cx, linearChars)) {
+ return false;
+ }
+
+ FunctionSyntaxKind syntaxKind = FunctionSyntaxKind::Expression;
+
+ RootedFunction fun(cx);
+ JSProtoKey protoKey;
+ if (isAsync) {
+ if (isGenerator) {
+ fun = CompileStandaloneAsyncGenerator(cx, options, srcBuf,
+ parameterListEnd, syntaxKind);
+ protoKey = JSProto_AsyncGeneratorFunction;
+ } else {
+ fun = CompileStandaloneAsyncFunction(cx, options, srcBuf,
+ parameterListEnd, syntaxKind);
+ protoKey = JSProto_AsyncFunction;
+ }
+ } else {
+ if (isGenerator) {
+ fun = CompileStandaloneGenerator(cx, options, srcBuf, parameterListEnd,
+ syntaxKind);
+ protoKey = JSProto_GeneratorFunction;
+ } else {
+ fun = CompileStandaloneFunction(cx, options, srcBuf, parameterListEnd,
+ syntaxKind);
+ protoKey = JSProto_Function;
+ }
+ }
+ if (!fun) {
+ return false;
+ }
+
+ RootedValue undefValue(cx);
+ RootedScript funScript(cx, JS_GetFunctionScript(cx, fun));
+ JS::InstantiateOptions instantiateOptions(options);
+ if (funScript &&
+ !UpdateDebugMetadata(cx, funScript, instantiateOptions, undefValue,
+ nullptr, maybeScript, maybeScript)) {
+ return false;
+ }
+
+ if (fun->isInterpreted()) {
+ fun->initEnvironment(&cx->global()->lexicalEnvironment());
+ }
+
+ // Steps 6, 29.
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, protoKey, &proto)) {
+ return false;
+ }
+
+ // Steps 7.d, 8.d (implicit).
+ // Call SetPrototype if an explicit prototype was given.
+ if (proto && !SetPrototype(cx, fun, proto)) {
+ return false;
+ }
+
+ // Step 38.
+ args.rval().setObject(*fun);
+ return true;
+}
+
+bool js::Function(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CreateDynamicFunction(cx, args, GeneratorKind::NotGenerator,
+ FunctionAsyncKind::SyncFunction);
+}
+
+bool js::Generator(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CreateDynamicFunction(cx, args, GeneratorKind::Generator,
+ FunctionAsyncKind::SyncFunction);
+}
+
+bool js::AsyncFunctionConstructor(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CreateDynamicFunction(cx, args, GeneratorKind::NotGenerator,
+ FunctionAsyncKind::AsyncFunction);
+}
+
+bool js::AsyncGeneratorConstructor(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CreateDynamicFunction(cx, args, GeneratorKind::Generator,
+ FunctionAsyncKind::AsyncFunction);
+}
+
+bool JSFunction::isBuiltinFunctionConstructor() {
+ return maybeNative() == Function || maybeNative() == Generator;
+}
+
+bool JSFunction::needsExtraBodyVarEnvironment() const {
+ if (isNativeFun()) {
+ return false;
+ }
+
+ if (!nonLazyScript()->functionHasExtraBodyVarScope()) {
+ return false;
+ }
+
+ return nonLazyScript()->functionExtraBodyVarScope()->hasEnvironment();
+}
+
+bool JSFunction::needsNamedLambdaEnvironment() const {
+ if (!isNamedLambda()) {
+ return false;
+ }
+
+ LexicalScope* scope = nonLazyScript()->maybeNamedLambdaScope();
+ if (!scope) {
+ return false;
+ }
+
+ return scope->hasEnvironment();
+}
+
+bool JSFunction::needsCallObject() const {
+ if (isNativeFun()) {
+ return false;
+ }
+
+ MOZ_ASSERT(hasBytecode());
+
+ // Note: this should be kept in sync with
+ // FunctionBox::needsCallObjectRegardlessOfBindings().
+ MOZ_ASSERT_IF(
+ baseScript()->funHasExtensibleScope() || isGenerator() || isAsync(),
+ nonLazyScript()->bodyScope()->hasEnvironment());
+
+ return nonLazyScript()->bodyScope()->hasEnvironment();
+}
+
+#ifdef DEBUG
+static JSObject* SkipEnvironmentObjects(JSObject* env) {
+ if (!env) {
+ return nullptr;
+ }
+ while (env->is<EnvironmentObject>()) {
+ env = &env->as<EnvironmentObject>().enclosingEnvironment();
+ }
+ return env;
+}
+
+static bool NewFunctionEnvironmentIsWellFormed(JSContext* cx,
+ HandleObject env) {
+ // Assert that the terminating environment is null, global, or a debug
+ // scope proxy. All other cases of polluting global scope behavior are
+ // handled by EnvironmentObjects (viz. non-syntactic DynamicWithObject and
+ // NonSyntacticVariablesObject).
+ RootedObject terminatingEnv(cx, SkipEnvironmentObjects(env));
+ return !terminatingEnv || terminatingEnv == cx->global() ||
+ terminatingEnv->is<DebugEnvironmentProxy>();
+}
+#endif
+
+static inline const JSClass* FunctionClassForAllocKind(
+ gc::AllocKind allocKind) {
+ return (allocKind == gc::AllocKind::FUNCTION) ? FunctionClassPtr
+ : FunctionExtendedClassPtr;
+}
+
+static void AssertClassMatchesAllocKind(const JSClass* clasp,
+ gc::AllocKind kind) {
+#ifdef DEBUG
+ if (kind == gc::AllocKind::FUNCTION_EXTENDED) {
+ MOZ_ASSERT(clasp == FunctionExtendedClassPtr);
+ } else {
+ MOZ_ASSERT(kind == gc::AllocKind::FUNCTION);
+ MOZ_ASSERT(clasp == FunctionClassPtr);
+ }
+#endif
+}
+
+static SharedShape* GetFunctionShape(JSContext* cx, const JSClass* clasp,
+ JSObject* proto, gc::AllocKind allocKind) {
+ AssertClassMatchesAllocKind(clasp, allocKind);
+
+ size_t nfixed = GetGCKindSlots(allocKind);
+ return SharedShape::getInitialShape(
+ cx, clasp, cx->realm(), TaggedProto(proto), nfixed, ObjectFlags());
+}
+
+SharedShape* GlobalObject::createFunctionShapeWithDefaultProto(JSContext* cx,
+ bool extended) {
+ GlobalObjectData& data = cx->global()->data();
+ HeapPtr<SharedShape*>& shapeRef =
+ extended ? data.extendedFunctionShapeWithDefaultProto
+ : data.functionShapeWithDefaultProto;
+ MOZ_ASSERT(!shapeRef);
+
+ RootedObject proto(cx,
+ GlobalObject::getOrCreatePrototype(cx, JSProto_Function));
+ if (!proto) {
+ return nullptr;
+ }
+
+ // Creating %Function.prototype% can end up initializing the shape.
+ if (shapeRef) {
+ return shapeRef;
+ }
+
+ gc::AllocKind allocKind =
+ extended ? gc::AllocKind::FUNCTION_EXTENDED : gc::AllocKind::FUNCTION;
+ const JSClass* clasp = FunctionClassForAllocKind(allocKind);
+
+ SharedShape* shape = GetFunctionShape(cx, clasp, proto, allocKind);
+ if (!shape) {
+ return nullptr;
+ }
+
+ shapeRef.init(shape);
+ return shape;
+}
+
+JSFunction* js::NewFunctionWithProto(
+ JSContext* cx, Native native, unsigned nargs, FunctionFlags flags,
+ HandleObject enclosingEnv, Handle<JSAtom*> atom, HandleObject proto,
+ gc::AllocKind allocKind /* = AllocKind::FUNCTION */,
+ NewObjectKind newKind /* = GenericObject */) {
+ MOZ_ASSERT(allocKind == gc::AllocKind::FUNCTION ||
+ allocKind == gc::AllocKind::FUNCTION_EXTENDED);
+ MOZ_ASSERT_IF(native, !enclosingEnv);
+ MOZ_ASSERT(NewFunctionEnvironmentIsWellFormed(cx, enclosingEnv));
+
+ // NOTE: Keep this in sync with `CreateFunctionFast` in Stencil.cpp
+
+ const JSClass* clasp = FunctionClassForAllocKind(allocKind);
+
+ Rooted<SharedShape*> shape(cx);
+ if (!proto) {
+ bool extended = (allocKind == gc::AllocKind::FUNCTION_EXTENDED);
+ shape = GlobalObject::getFunctionShapeWithDefaultProto(cx, extended);
+ } else {
+ shape = GetFunctionShape(cx, clasp, proto, allocKind);
+ }
+ if (!shape) {
+ return nullptr;
+ }
+
+ gc::Heap heap = GetInitialHeap(newKind, clasp);
+ JSFunction* fun = JSFunction::create(cx, allocKind, heap, shape);
+ if (!fun) {
+ return nullptr;
+ }
+
+ if (allocKind == gc::AllocKind::FUNCTION_EXTENDED) {
+ flags.setIsExtended();
+ }
+
+ // Disallow flags that require special union arms to be initialized.
+ MOZ_ASSERT(!flags.hasSelfHostedLazyScript());
+ MOZ_ASSERT(!flags.isWasmWithJitEntry());
+
+ /* Initialize all function members. */
+ fun->setArgCount(uint16_t(nargs));
+ fun->setFlags(flags);
+ if (fun->isInterpreted()) {
+ fun->initScript(nullptr);
+ fun->initEnvironment(enclosingEnv);
+ } else {
+ MOZ_ASSERT(fun->isNativeFun());
+ fun->initNative(native, nullptr);
+ }
+ fun->initAtom(atom);
+
+ return fun;
+}
+
+bool js::GetFunctionPrototype(JSContext* cx, js::GeneratorKind generatorKind,
+ js::FunctionAsyncKind asyncKind,
+ js::MutableHandleObject proto) {
+ if (generatorKind == js::GeneratorKind::NotGenerator) {
+ if (asyncKind == js::FunctionAsyncKind::SyncFunction) {
+ proto.set(nullptr);
+ return true;
+ }
+
+ proto.set(
+ GlobalObject::getOrCreateAsyncFunctionPrototype(cx, cx->global()));
+ } else {
+ if (asyncKind == js::FunctionAsyncKind::SyncFunction) {
+ proto.set(GlobalObject::getOrCreateGeneratorFunctionPrototype(
+ cx, cx->global()));
+ } else {
+ proto.set(GlobalObject::getOrCreateAsyncGenerator(cx, cx->global()));
+ }
+ }
+ return !!proto;
+}
+
+#ifdef DEBUG
+static bool CanReuseScriptForClone(JS::Realm* realm, HandleFunction fun,
+ HandleObject newEnclosingEnv) {
+ MOZ_ASSERT(fun->isInterpreted());
+
+ if (realm != fun->realm()) {
+ return false;
+ }
+
+ if (newEnclosingEnv->is<GlobalObject>()) {
+ return true;
+ }
+
+ // Don't need to clone the script if newEnclosingEnv is a syntactic scope,
+ // since in that case we have some actual scope objects on our scope chain and
+ // whatnot; whoever put them there should be responsible for setting our
+ // script's flags appropriately. We hit this case for JSOp::Lambda, for
+ // example.
+ if (IsSyntacticEnvironment(newEnclosingEnv)) {
+ return true;
+ }
+
+ // We need to clone the script if we're not already marked as having a
+ // non-syntactic scope. The HasNonSyntacticScope flag is not computed for lazy
+ // scripts so fallback to checking the scope chain.
+ BaseScript* script = fun->baseScript();
+ return script->hasNonSyntacticScope() ||
+ script->enclosingScope()->hasOnChain(ScopeKind::NonSyntactic);
+}
+#endif
+
+static inline JSFunction* NewFunctionClone(JSContext* cx, HandleFunction fun,
+ HandleObject proto) {
+ MOZ_ASSERT(cx->realm() == fun->realm());
+ MOZ_ASSERT(proto);
+
+ const JSClass* clasp = fun->getClass();
+ gc::AllocKind allocKind = fun->getAllocKind();
+ AssertClassMatchesAllocKind(clasp, allocKind);
+
+ // If |fun| also has |proto| as prototype (the common case) we can reuse its
+ // shape for the clone. This works because |fun| isn't exposed to script.
+ Rooted<SharedShape*> shape(cx);
+ if (fun->staticPrototype() == proto) {
+ shape = fun->sharedShape();
+ MOZ_ASSERT(shape->propMapLength() == 0);
+ MOZ_ASSERT(shape->objectFlags().isEmpty());
+ MOZ_ASSERT(shape->realm() == cx->realm());
+ } else {
+ shape = GetFunctionShape(cx, clasp, proto, allocKind);
+ if (!shape) {
+ return nullptr;
+ }
+ }
+
+ JSFunction* clone =
+ JSFunction::create(cx, allocKind, gc::Heap::Default, shape);
+ if (!clone) {
+ return nullptr;
+ }
+
+ constexpr uint16_t NonCloneableFlags =
+ FunctionFlags::RESOLVED_LENGTH | FunctionFlags::RESOLVED_NAME;
+
+ FunctionFlags flags = fun->flags();
+ flags.clearFlags(NonCloneableFlags);
+
+ clone->setArgCount(fun->nargs());
+ clone->setFlags(flags);
+
+ // Note: |clone| and |fun| are same-zone so we don't need to call markAtom.
+ clone->initAtom(fun->displayAtom());
+
+ return clone;
+}
+
+JSFunction* js::CloneFunctionReuseScript(JSContext* cx, HandleFunction fun,
+ HandleObject enclosingEnv,
+ HandleObject proto) {
+ MOZ_ASSERT(cx->realm() == fun->realm());
+ MOZ_ASSERT(NewFunctionEnvironmentIsWellFormed(cx, enclosingEnv));
+ MOZ_ASSERT(fun->isInterpreted());
+ MOZ_ASSERT(CanReuseScriptForClone(cx->realm(), fun, enclosingEnv));
+
+ RootedFunction clone(cx, NewFunctionClone(cx, fun, proto));
+ if (!clone) {
+ return nullptr;
+ }
+
+ if (fun->hasBaseScript()) {
+ BaseScript* base = fun->baseScript();
+ clone->initScript(base);
+ clone->initEnvironment(enclosingEnv);
+ } else {
+ MOZ_ASSERT(fun->hasSelfHostedLazyScript());
+ SelfHostedLazyScript* lazy = fun->selfHostedLazyScript();
+ clone->initSelfHostedLazyScript(lazy);
+ clone->initEnvironment(enclosingEnv);
+ }
+
+#ifdef DEBUG
+ // Assert extended slots don't need to be copied.
+ if (fun->isExtended()) {
+ for (unsigned i = 0; i < FunctionExtended::NUM_EXTENDED_SLOTS; i++) {
+ MOZ_ASSERT(fun->getExtendedSlot(i).isUndefined());
+ MOZ_ASSERT(clone->getExtendedSlot(i).isUndefined());
+ }
+ }
+#endif
+
+ return clone;
+}
+
+JSFunction* js::CloneAsmJSModuleFunction(JSContext* cx, HandleFunction fun) {
+ MOZ_ASSERT(fun->isNativeFun());
+ MOZ_ASSERT(IsAsmJSModule(fun));
+ MOZ_ASSERT(fun->isExtended());
+ MOZ_ASSERT(cx->compartment() == fun->compartment());
+
+ RootedObject proto(cx, fun->staticPrototype());
+ JSFunction* clone = NewFunctionClone(cx, fun, proto);
+ if (!clone) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(fun->native() == InstantiateAsmJS);
+ MOZ_ASSERT(!fun->hasJitInfo());
+ clone->initNative(InstantiateAsmJS, nullptr);
+
+ JSObject* moduleObj =
+ &fun->getExtendedSlot(FunctionExtended::ASMJS_MODULE_SLOT).toObject();
+ clone->initExtendedSlot(FunctionExtended::ASMJS_MODULE_SLOT,
+ ObjectValue(*moduleObj));
+
+ return clone;
+}
+
+static JSAtom* SymbolToFunctionName(JSContext* cx, JS::Symbol* symbol,
+ FunctionPrefixKind prefixKind) {
+ // Step 4.a.
+ JSAtom* desc = symbol->description();
+
+ // Step 4.b, no prefix fastpath.
+ if (!desc && prefixKind == FunctionPrefixKind::None) {
+ return cx->names().empty;
+ }
+
+ // Step 5 (reordered).
+ StringBuffer sb(cx);
+ if (prefixKind == FunctionPrefixKind::Get) {
+ if (!sb.append("get ")) {
+ return nullptr;
+ }
+ } else if (prefixKind == FunctionPrefixKind::Set) {
+ if (!sb.append("set ")) {
+ return nullptr;
+ }
+ }
+
+ // Step 4.b.
+ if (desc) {
+ // Note: Private symbols are wedged in, as implementation wise they're
+ // PrivateNameSymbols with a the source level name as a description
+ // i.e. obj.#f desugars to obj.[PrivateNameSymbol("#f")], however
+ // they don't use the symbol naming, but rather property naming.
+ if (symbol->isPrivateName()) {
+ if (!sb.append(desc)) {
+ return nullptr;
+ }
+ } else {
+ // Step 4.c.
+ if (!sb.append('[') || !sb.append(desc) || !sb.append(']')) {
+ return nullptr;
+ }
+ }
+ }
+ return sb.finishAtom();
+}
+
+static JSAtom* NameToFunctionName(JSContext* cx, HandleValue name,
+ FunctionPrefixKind prefixKind) {
+ MOZ_ASSERT(name.isString() || name.isNumeric());
+
+ if (prefixKind == FunctionPrefixKind::None) {
+ return ToAtom<CanGC>(cx, name);
+ }
+
+ JSString* nameStr = ToString(cx, name);
+ if (!nameStr) {
+ return nullptr;
+ }
+
+ StringBuffer sb(cx);
+ if (prefixKind == FunctionPrefixKind::Get) {
+ if (!sb.append("get ")) {
+ return nullptr;
+ }
+ } else {
+ if (!sb.append("set ")) {
+ return nullptr;
+ }
+ }
+ if (!sb.append(nameStr)) {
+ return nullptr;
+ }
+ return sb.finishAtom();
+}
+
+/*
+ * Return an atom for use as the name of a builtin method with the given
+ * property id.
+ *
+ * Function names are always strings. If id is the well-known @@iterator
+ * symbol, this returns "[Symbol.iterator]". If a prefix is supplied the final
+ * name is |prefix + " " + name|.
+ *
+ * Implements steps 3-5 of 9.2.11 SetFunctionName in ES2016.
+ */
+JSAtom* js::IdToFunctionName(
+ JSContext* cx, HandleId id,
+ FunctionPrefixKind prefixKind /* = FunctionPrefixKind::None */) {
+ MOZ_ASSERT(id.isString() || id.isSymbol() || id.isInt());
+
+ // No prefix fastpath.
+ if (id.isAtom() && prefixKind == FunctionPrefixKind::None) {
+ return id.toAtom();
+ }
+
+ // Step 3 (implicit).
+
+ // Step 4.
+ if (id.isSymbol()) {
+ return SymbolToFunctionName(cx, id.toSymbol(), prefixKind);
+ }
+
+ // Step 5.
+ RootedValue idv(cx, IdToValue(id));
+ return NameToFunctionName(cx, idv, prefixKind);
+}
+
+bool js::SetFunctionName(JSContext* cx, HandleFunction fun, HandleValue name,
+ FunctionPrefixKind prefixKind) {
+ MOZ_ASSERT(name.isString() || name.isSymbol() || name.isNumeric());
+
+ // `fun` is a newly created function, so it can't already have an inferred
+ // name.
+ MOZ_ASSERT(!fun->hasInferredName());
+
+ // Anonymous functions should neither have an own 'name' property nor a
+ // resolved name at this point.
+ MOZ_ASSERT(!fun->containsPure(cx->names().name));
+ MOZ_ASSERT(!fun->hasResolvedName());
+
+ JSAtom* funName = name.isSymbol()
+ ? SymbolToFunctionName(cx, name.toSymbol(), prefixKind)
+ : NameToFunctionName(cx, name, prefixKind);
+ if (!funName) {
+ return false;
+ }
+
+ fun->setInferredName(funName);
+
+ return true;
+}
+
+JSFunction* js::DefineFunction(
+ JSContext* cx, HandleObject obj, HandleId id, Native native, unsigned nargs,
+ unsigned flags, gc::AllocKind allocKind /* = AllocKind::FUNCTION */) {
+ Rooted<JSAtom*> atom(cx, IdToFunctionName(cx, id));
+ if (!atom) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(native);
+
+ RootedFunction fun(cx);
+ if (flags & JSFUN_CONSTRUCTOR) {
+ fun = NewNativeConstructor(cx, native, nargs, atom, allocKind);
+ } else {
+ fun = NewNativeFunction(cx, native, nargs, atom, allocKind);
+ }
+
+ if (!fun) {
+ return nullptr;
+ }
+
+ RootedValue funVal(cx, ObjectValue(*fun));
+ if (!DefineDataProperty(cx, obj, id, funVal, flags & ~JSFUN_FLAGS_MASK)) {
+ return nullptr;
+ }
+
+ return fun;
+}
+
+void js::ReportIncompatibleMethod(JSContext* cx, const CallArgs& args,
+ const JSClass* clasp) {
+ RootedValue thisv(cx, args.thisv());
+
+#ifdef DEBUG
+ switch (thisv.type()) {
+ case ValueType::Object:
+ MOZ_ASSERT(thisv.toObject().getClass() != clasp ||
+ !thisv.toObject().is<NativeObject>() ||
+ !thisv.toObject().staticPrototype() ||
+ thisv.toObject().staticPrototype()->getClass() != clasp);
+ break;
+# ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive:
+ MOZ_CRASH("ExtendedPrimitive is not supported yet");
+ break;
+# endif
+ case ValueType::String:
+ MOZ_ASSERT(clasp != &StringObject::class_);
+ break;
+ case ValueType::Double:
+ case ValueType::Int32:
+ MOZ_ASSERT(clasp != &NumberObject::class_);
+ break;
+ case ValueType::Boolean:
+ MOZ_ASSERT(clasp != &BooleanObject::class_);
+ break;
+ case ValueType::Symbol:
+ MOZ_ASSERT(clasp != &SymbolObject::class_);
+ break;
+ case ValueType::BigInt:
+ MOZ_ASSERT(clasp != &BigIntObject::class_);
+ break;
+ case ValueType::Undefined:
+ case ValueType::Null:
+ break;
+ case ValueType::Magic:
+ case ValueType::PrivateGCThing:
+ MOZ_CRASH("unexpected type");
+ }
+#endif
+
+ if (JSFunction* fun = ReportIfNotFunction(cx, args.calleev())) {
+ UniqueChars funNameBytes;
+ if (const char* funName = GetFunctionNameBytes(cx, fun, &funNameBytes)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_INCOMPATIBLE_PROTO, clasp->name, funName,
+ InformalValueTypeName(thisv));
+ }
+ }
+}
+
+void js::ReportIncompatible(JSContext* cx, const CallArgs& args) {
+ if (JSFunction* fun = ReportIfNotFunction(cx, args.calleev())) {
+ UniqueChars funNameBytes;
+ if (const char* funName = GetFunctionNameBytes(cx, fun, &funNameBytes)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_INCOMPATIBLE_METHOD, funName, "method",
+ InformalValueTypeName(args.thisv()));
+ }
+ }
+}
+
+namespace JS {
+namespace detail {
+
+JS_PUBLIC_API void CheckIsValidConstructible(const Value& calleev) {
+ MOZ_ASSERT(calleev.toObject().isConstructor());
+}
+
+} // namespace detail
+} // namespace JS
diff --git a/js/src/vm/JSFunction.h b/js/src/vm/JSFunction.h
new file mode 100644
index 0000000000..7f44f601c5
--- /dev/null
+++ b/js/src/vm/JSFunction.h
@@ -0,0 +1,875 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_JSFunction_h
+#define vm_JSFunction_h
+
+/*
+ * JS function definitions.
+ */
+
+#include <string_view>
+
+#include "jstypes.h"
+
+#include "gc/Policy.h"
+#include "js/shadow/Function.h" // JS::shadow::Function
+#include "vm/FunctionFlags.h" // FunctionFlags
+#include "vm/FunctionPrefixKind.h" // FunctionPrefixKind
+#include "vm/GeneratorAndAsyncKind.h" // GeneratorKind, FunctionAsyncKind
+#include "vm/JSAtom.h"
+#include "vm/JSObject.h"
+#include "vm/JSScript.h"
+
+class JSJitInfo;
+
+namespace js {
+
+class FunctionExtended;
+struct SelfHostedLazyScript;
+
+using Native = JSNative;
+
+static constexpr std::string_view FunctionConstructorMedialSigils = ") {\n";
+static constexpr std::string_view FunctionConstructorFinalBrace = "\n}";
+
+// JSFunctions can have one of two classes:
+extern const JSClass FunctionClass;
+extern const JSClass ExtendedFunctionClass;
+
+namespace wasm {
+
+class Instance;
+
+} // namespace wasm
+} // namespace js
+
+class JSFunction : public js::NativeObject {
+ public:
+ static_assert(sizeof(js::FunctionFlags) == sizeof(uint16_t));
+ static constexpr size_t ArgCountShift = 16;
+ static constexpr size_t FlagsMask = js::BitMask(ArgCountShift);
+ static constexpr size_t ArgCountMask = js::BitMask(16) << ArgCountShift;
+
+ enum {
+ /*
+ * Bitfield composed of FunctionFlags and argument count, stored as a
+ * PrivateUint32Value.
+ *
+ * If any of these flags needs to be accessed in off-thread JIT compilation,
+ * copy it to js::jit::WrappedFunction.
+ */
+ FlagsAndArgCountSlot,
+
+ /*
+ * For native functions, the native method pointer stored as a private
+ * value, or undefined.
+ *
+ * For interpreted functions, the environment object for new activations or
+ * null.
+ */
+ NativeFuncOrInterpretedEnvSlot,
+
+ /*
+ * For native functions this is one of:
+ *
+ * - JSJitInfo* to be used by the JIT, only used if isBuiltinNative() for
+ * builtin natives
+ *
+ * - wasm function index for wasm/asm.js without a jit entry. Always has
+ * the low bit set to ensure it's never identical to a BaseScript*
+ * pointer
+ *
+ * - a wasm JIT entry
+ *
+ * The JIT depends on none of the above being a valid BaseScript pointer.
+ *
+ * For interpreted functions this is either a BaseScript or the
+ * SelfHostedLazyScript pointer.
+ *
+ * These are all stored as private values, because the JIT assumes that it
+ * can access the SelfHostedLazyScript and BaseScript pointer in the same
+ * way.
+ */
+ NativeJitInfoOrInterpretedScriptSlot,
+
+ // The `atom_` field can have different meanings depending on the function
+ // type and flags. It is used for diagnostics, decompiling, and
+ //
+ // a. If HAS_GUESSED_ATOM is not set, to store the initial value of the
+ // "name" property of functions. But also see RESOLVED_NAME.
+ // b. If HAS_GUESSED_ATOM is set, `atom_` is only used for diagnostics,
+ // but must not be used for the "name" property.
+ // c. If HAS_INFERRED_NAME is set, the function wasn't given an explicit
+ // name in the source text, e.g. `function fn(){}`, but instead it
+ // was inferred based on how the function was defined in the source
+ // text. The exact name inference rules are defined in the ECMAScript
+ // specification.
+ // Name inference can happen at compile-time, for example in
+ // `var fn = function(){}`, or it can happen at runtime, for example
+ // in `var o = {[Symbol.iterator]: function(){}}`. When it happens at
+ // compile-time, the HAS_INFERRED_NAME is set directly in the
+ // bytecode emitter, when it happens at runtime, the flag is set when
+ // evaluating the JSOp::SetFunName bytecode.
+ // d. HAS_GUESSED_ATOM and HAS_INFERRED_NAME cannot both be set.
+ // e. `atom_` can be null if neither an explicit, nor inferred, nor a
+ // guessed name was set.
+ //
+ // Self-hosted functions have two names. For example, Array.prototype.sort
+ // has the standard name "sort", but the implementation in Array.js is named
+ // "ArraySort".
+ //
+ // - In the self-hosting realm, these functions have `_atom` set to the
+ // implementation name.
+ //
+ // - When we clone these functions into normal realms, we set `_atom` to
+ // the standard name. (The self-hosted name is also stored on the clone,
+ // in another slot; see GetClonedSelfHostedFunctionName().)
+ AtomSlot,
+
+ SlotCount
+ };
+
+ private:
+ using FunctionFlags = js::FunctionFlags;
+
+ public:
+ static inline JSFunction* create(JSContext* cx, js::gc::AllocKind kind,
+ js::gc::Heap heap,
+ js::Handle<js::SharedShape*> shape);
+
+ /* Call objects must be created for each invocation of this function. */
+ bool needsCallObject() const;
+
+ bool needsExtraBodyVarEnvironment() const;
+ bool needsNamedLambdaEnvironment() const;
+
+ bool needsFunctionEnvironmentObjects() const {
+ bool res = nonLazyScript()->needsFunctionEnvironmentObjects();
+ MOZ_ASSERT(res == (needsCallObject() || needsNamedLambdaEnvironment()));
+ return res;
+ }
+
+ bool needsSomeEnvironmentObject() const {
+ return needsFunctionEnvironmentObjects() || needsExtraBodyVarEnvironment();
+ }
+
+ uint32_t flagsAndArgCountRaw() const {
+ return getFixedSlot(FlagsAndArgCountSlot).toPrivateUint32();
+ }
+
+ void initFlagsAndArgCount() {
+ initFixedSlot(FlagsAndArgCountSlot, JS::PrivateUint32Value(0));
+ }
+
+ size_t nargs() const { return flagsAndArgCountRaw() >> ArgCountShift; }
+
+ FunctionFlags flags() const {
+ return FunctionFlags(uint16_t(flagsAndArgCountRaw() & FlagsMask));
+ }
+
+ FunctionFlags::FunctionKind kind() const { return flags().kind(); }
+
+ /* A function can be classified as either native (C++) or interpreted (JS): */
+ bool isInterpreted() const { return flags().isInterpreted(); }
+ bool isNativeFun() const { return flags().isNativeFun(); }
+
+ bool isConstructor() const { return flags().isConstructor(); }
+
+ bool isNonBuiltinConstructor() const {
+ return flags().isNonBuiltinConstructor();
+ }
+
+ /* Possible attributes of a native function: */
+ bool isAsmJSNative() const { return flags().isAsmJSNative(); }
+
+ bool isWasm() const { return flags().isWasm(); }
+ bool isWasmWithJitEntry() const { return flags().isWasmWithJitEntry(); }
+ bool isNativeWithoutJitEntry() const {
+ return flags().isNativeWithoutJitEntry();
+ }
+ bool isBuiltinNative() const { return flags().isBuiltinNative(); }
+
+ bool hasJitEntry() const { return flags().hasJitEntry(); }
+
+ /* Possible attributes of an interpreted function: */
+ bool hasInferredName() const { return flags().hasInferredName(); }
+ bool hasGuessedAtom() const { return flags().hasGuessedAtom(); }
+
+ bool isLambda() const { return flags().isLambda(); }
+
+ // These methods determine which kind of script we hold.
+ //
+ // For live JSFunctions the pointer values will always be non-null, but due to
+ // partial initialization the GC (and other features that scan the heap
+ // directly) may still return a null pointer.
+ bool hasSelfHostedLazyScript() const {
+ return flags().hasSelfHostedLazyScript();
+ }
+ bool hasBaseScript() const { return flags().hasBaseScript(); }
+
+ bool hasBytecode() const {
+ MOZ_ASSERT(!isIncomplete());
+ return hasBaseScript() && baseScript()->hasBytecode();
+ }
+
+ bool isGhost() const { return flags().isGhost(); }
+
+ // Arrow functions store their lexical new.target in the first extended slot.
+ bool isArrow() const { return flags().isArrow(); }
+ // Every class-constructor is also a method.
+ bool isMethod() const { return flags().isMethod(); }
+ bool isClassConstructor() const { return flags().isClassConstructor(); }
+
+ bool isGetter() const { return flags().isGetter(); }
+ bool isSetter() const { return flags().isSetter(); }
+
+ bool allowSuperProperty() const { return flags().allowSuperProperty(); }
+
+ bool hasResolvedLength() const { return flags().hasResolvedLength(); }
+ bool hasResolvedName() const { return flags().hasResolvedName(); }
+
+ bool isSelfHostedOrIntrinsic() const {
+ return flags().isSelfHostedOrIntrinsic();
+ }
+ bool isSelfHostedBuiltin() const { return flags().isSelfHostedBuiltin(); }
+
+ bool isIntrinsic() const { return flags().isIntrinsic(); }
+
+ bool hasJitScript() const {
+ if (!hasBaseScript()) {
+ return false;
+ }
+
+ return baseScript()->hasJitScript();
+ }
+
+ /* Compound attributes: */
+ bool isBuiltin() const { return isBuiltinNative() || isSelfHostedBuiltin(); }
+
+ bool isNamedLambda() const {
+ return flags().isNamedLambda(displayAtom() != nullptr);
+ }
+
+ bool hasLexicalThis() const { return isArrow(); }
+
+ bool isBuiltinFunctionConstructor();
+ bool needsPrototypeProperty();
+
+ // Returns true if this function must have a non-configurable .prototype data
+ // property. This is used to ensure looking up .prototype elsewhere will have
+ // no side-effects.
+ bool hasNonConfigurablePrototypeDataProperty();
+
+ // Returns true if |new Fun()| should not allocate a new object caller-side
+ // but pass the uninitialized-lexical MagicValue and rely on the callee to
+ // construct its own |this| object.
+ bool constructorNeedsUninitializedThis() const {
+ MOZ_ASSERT(isConstructor());
+ MOZ_ASSERT(isInterpreted());
+ return isDerivedClassConstructor();
+ }
+
+ /* Returns the strictness of this function, which must be interpreted. */
+ bool strict() const { return baseScript()->strict(); }
+
+ void setFlags(FunctionFlags flags) { setFlags(flags.toRaw()); }
+ void setFlags(uint16_t flags) {
+ uint32_t flagsAndArgCount = flagsAndArgCountRaw();
+ flagsAndArgCount &= ~FlagsMask;
+ flagsAndArgCount |= flags;
+ js::HeapSlot& slot = getFixedSlotRef(FlagsAndArgCountSlot);
+ slot.unbarrieredSet(JS::PrivateUint32Value(flagsAndArgCount));
+ }
+
+ // Make the function constructible.
+ void setIsConstructor() { setFlags(flags().setIsConstructor()); }
+
+ // Can be called multiple times by the parser.
+ void setArgCount(uint16_t nargs) {
+ uint32_t flagsAndArgCount = flagsAndArgCountRaw();
+ flagsAndArgCount &= ~ArgCountMask;
+ flagsAndArgCount |= nargs << ArgCountShift;
+ js::HeapSlot& slot = getFixedSlotRef(FlagsAndArgCountSlot);
+ slot.unbarrieredSet(JS::PrivateUint32Value(flagsAndArgCount));
+ }
+
+ void setIsSelfHostedBuiltin() { setFlags(flags().setIsSelfHostedBuiltin()); }
+ void setIsIntrinsic() { setFlags(flags().setIsIntrinsic()); }
+
+ void setResolvedLength() { setFlags(flags().setResolvedLength()); }
+ void setResolvedName() { setFlags(flags().setResolvedName()); }
+
+ static inline bool getUnresolvedLength(JSContext* cx, js::HandleFunction fun,
+ uint16_t* length);
+
+ inline JSAtom* infallibleGetUnresolvedName(JSContext* cx);
+
+ JSAtom* explicitName() const {
+ return (hasInferredName() || hasGuessedAtom()) ? nullptr : rawAtom();
+ }
+
+ JSAtom* explicitOrInferredName() const {
+ return hasGuessedAtom() ? nullptr : rawAtom();
+ }
+
+ void initAtom(JSAtom* atom) {
+ MOZ_ASSERT_IF(atom, js::AtomIsMarked(zone(), atom));
+ MOZ_ASSERT(getFixedSlot(AtomSlot).isUndefined());
+ if (atom) {
+ initFixedSlot(AtomSlot, JS::StringValue(atom));
+ }
+ }
+
+ void setAtom(JSAtom* atom) {
+ MOZ_ASSERT_IF(atom, js::AtomIsMarked(zone(), atom));
+ setFixedSlot(AtomSlot, atom ? JS::StringValue(atom) : JS::UndefinedValue());
+ }
+
+ JSAtom* displayAtom() const { return rawAtom(); }
+
+ JSAtom* rawAtom() const {
+ JS::Value value = getFixedSlot(AtomSlot);
+ return value.isUndefined() ? nullptr : &value.toString()->asAtom();
+ }
+
+ void setInferredName(JSAtom* atom) {
+ MOZ_ASSERT(!rawAtom());
+ MOZ_ASSERT(atom);
+ MOZ_ASSERT(!hasGuessedAtom());
+ setAtom(atom);
+ setFlags(flags().setInferredName());
+ }
+ JSAtom* inferredName() const {
+ MOZ_ASSERT(hasInferredName());
+ MOZ_ASSERT(rawAtom());
+ return rawAtom();
+ }
+
+ void setGuessedAtom(JSAtom* atom) {
+ MOZ_ASSERT(!rawAtom());
+ MOZ_ASSERT(atom);
+ MOZ_ASSERT(!hasInferredName());
+ MOZ_ASSERT(!hasGuessedAtom());
+ setAtom(atom);
+ setFlags(flags().setGuessedAtom());
+ }
+
+ /* uint16_t representation bounds number of call object dynamic slots. */
+ enum { MAX_ARGS_AND_VARS = 2 * ((1U << 16) - 1) };
+
+ /*
+ * For an interpreted function, accessors for the initial scope object of
+ * activations (stack frames) of the function.
+ */
+ JSObject* environment() const {
+ MOZ_ASSERT(isInterpreted());
+ return getFixedSlot(NativeFuncOrInterpretedEnvSlot).toObjectOrNull();
+ }
+
+ void initEnvironment(JSObject* obj) {
+ MOZ_ASSERT(isInterpreted());
+ initFixedSlot(NativeFuncOrInterpretedEnvSlot, JS::ObjectOrNullValue(obj));
+ }
+
+ public:
+ static constexpr size_t offsetOfFlagsAndArgCount() {
+ return getFixedSlotOffset(FlagsAndArgCountSlot);
+ }
+ static size_t offsetOfEnvironment() { return offsetOfNativeOrEnv(); }
+ static size_t offsetOfAtom() { return getFixedSlotOffset(AtomSlot); }
+
+ static bool delazifyLazilyInterpretedFunction(JSContext* cx,
+ js::HandleFunction fun);
+ static bool delazifySelfHostedLazyFunction(JSContext* cx,
+ js::HandleFunction fun);
+ void maybeRelazify(JSRuntime* rt);
+
+ // Function Scripts
+ //
+ // Interpreted functions have either a BaseScript or a SelfHostedLazyScript. A
+ // BaseScript may either be lazy or non-lazy (hasBytecode()). Methods may
+ // return a JSScript* if underlying BaseScript is known to have bytecode.
+ //
+ // There are several methods to get the script of an interpreted function:
+ //
+ // - For all interpreted functions, getOrCreateScript() will get the
+ // JSScript, delazifying the function if necessary. This is the safest to
+ // use, but has extra checks, requires a cx and may trigger a GC.
+ //
+ // - For functions known to have a JSScript, nonLazyScript() will get it.
+
+ static JSScript* getOrCreateScript(JSContext* cx, js::HandleFunction fun) {
+ MOZ_ASSERT(fun->isInterpreted());
+ MOZ_ASSERT(cx);
+
+ if (fun->hasSelfHostedLazyScript()) {
+ if (!delazifySelfHostedLazyFunction(cx, fun)) {
+ return nullptr;
+ }
+ return fun->nonLazyScript();
+ }
+
+ MOZ_ASSERT(fun->hasBaseScript());
+
+ if (!fun->baseScript()->hasBytecode()) {
+ if (!delazifyLazilyInterpretedFunction(cx, fun)) {
+ return nullptr;
+ }
+ }
+ return fun->nonLazyScript();
+ }
+
+ // If this is a scripted function, returns its canonical function (the
+ // original function allocated by the frontend). Note that lazy self-hosted
+ // builtins don't have a lazy script so in that case we also return nullptr.
+ JSFunction* maybeCanonicalFunction() const {
+ if (hasBaseScript()) {
+ return baseScript()->function();
+ }
+ return nullptr;
+ }
+
+ private:
+ void* nativeJitInfoOrInterpretedScript() const {
+ return getFixedSlot(NativeJitInfoOrInterpretedScriptSlot).toPrivate();
+ }
+ void setNativeJitInfoOrInterpretedScript(void* ptr) {
+ // This always stores a PrivateValue and so doesn't require a barrier.
+ js::HeapSlot& slot = getFixedSlotRef(NativeJitInfoOrInterpretedScriptSlot);
+ slot.unbarrieredSet(JS::PrivateValue(ptr));
+ }
+
+ public:
+ // The default state of a JSFunction that is not ready for execution. If
+ // observed outside initialization, this is the result of failure during
+ // bytecode compilation.
+ //
+ // A BaseScript is fully initialized before u.script.s.script_ is initialized
+ // with a reference to it.
+ bool isIncomplete() const {
+ return isInterpreted() && !nativeJitInfoOrInterpretedScript();
+ }
+
+ JSScript* nonLazyScript() const {
+ MOZ_ASSERT(hasBytecode());
+ return static_cast<JSScript*>(baseScript());
+ }
+
+ js::SelfHostedLazyScript* selfHostedLazyScript() const {
+ MOZ_ASSERT(hasSelfHostedLazyScript());
+ return static_cast<js::SelfHostedLazyScript*>(
+ nativeJitInfoOrInterpretedScript());
+ }
+
+ // Access fields defined on both lazy and non-lazy scripts.
+ js::BaseScript* baseScript() const {
+ MOZ_ASSERT(hasBaseScript());
+ return static_cast<JSScript*>(nativeJitInfoOrInterpretedScript());
+ }
+
+ static inline bool getLength(JSContext* cx, js::HandleFunction fun,
+ uint16_t* length);
+
+ js::Scope* enclosingScope() const { return baseScript()->enclosingScope(); }
+
+ void setEnclosingLazyScript(js::BaseScript* enclosingScript) {
+ baseScript()->setEnclosingScript(enclosingScript);
+ }
+
+ js::GeneratorKind generatorKind() const {
+ if (hasBaseScript()) {
+ return baseScript()->generatorKind();
+ }
+ if (hasSelfHostedLazyScript()) {
+ return clonedSelfHostedGeneratorKind();
+ }
+ return js::GeneratorKind::NotGenerator;
+ }
+
+ js::GeneratorKind clonedSelfHostedGeneratorKind() const;
+
+ bool isGenerator() const {
+ return generatorKind() == js::GeneratorKind::Generator;
+ }
+
+ js::FunctionAsyncKind asyncKind() const {
+ if (hasBaseScript()) {
+ return baseScript()->asyncKind();
+ }
+ return js::FunctionAsyncKind::SyncFunction;
+ }
+
+ bool isAsync() const {
+ return asyncKind() == js::FunctionAsyncKind::AsyncFunction;
+ }
+
+ bool isGeneratorOrAsync() const { return isGenerator() || isAsync(); }
+
+ void initScript(js::BaseScript* script) {
+ MOZ_ASSERT_IF(script, realm() == script->realm());
+ MOZ_ASSERT(isInterpreted());
+ MOZ_ASSERT_IF(hasBaseScript(),
+ !baseScript()); // No write barrier required.
+ setNativeJitInfoOrInterpretedScript(script);
+ }
+
+ void initSelfHostedLazyScript(js::SelfHostedLazyScript* lazy) {
+ MOZ_ASSERT(isSelfHostedBuiltin());
+ MOZ_ASSERT(isInterpreted());
+ if (hasBaseScript()) {
+ js::gc::PreWriteBarrier(baseScript());
+ }
+ FunctionFlags f = flags();
+ f.clearBaseScript();
+ f.setSelfHostedLazy();
+ setFlags(f);
+ setNativeJitInfoOrInterpretedScript(lazy);
+ MOZ_ASSERT(hasSelfHostedLazyScript());
+ }
+
+ void clearSelfHostedLazyScript() {
+ MOZ_ASSERT(isSelfHostedBuiltin());
+ MOZ_ASSERT(isInterpreted());
+ MOZ_ASSERT(!hasBaseScript()); // No write barrier required.
+ FunctionFlags f = flags();
+ f.clearSelfHostedLazy();
+ f.setBaseScript();
+ setFlags(f);
+ setNativeJitInfoOrInterpretedScript(nullptr);
+ MOZ_ASSERT(isIncomplete());
+ }
+
+ JSNative native() const {
+ MOZ_ASSERT(isNativeFun());
+ return nativeUnchecked();
+ }
+ JSNative nativeUnchecked() const {
+ // Can be called by Ion off-main thread.
+ JS::Value value = getFixedSlot(NativeFuncOrInterpretedEnvSlot);
+ return reinterpret_cast<JSNative>(value.toPrivate());
+ }
+
+ JSNative maybeNative() const { return isInterpreted() ? nullptr : native(); }
+
+ void initNative(js::Native native, const JSJitInfo* jitInfo) {
+ MOZ_ASSERT(isNativeFun());
+ MOZ_ASSERT_IF(jitInfo, isBuiltinNative());
+ MOZ_ASSERT(native);
+ initFixedSlot(NativeFuncOrInterpretedEnvSlot,
+ JS::PrivateValue(reinterpret_cast<void*>(native)));
+ setNativeJitInfoOrInterpretedScript(const_cast<JSJitInfo*>(jitInfo));
+ }
+ bool hasJitInfo() const { return isBuiltinNative() && jitInfoUnchecked(); }
+ const JSJitInfo* jitInfo() const {
+ MOZ_ASSERT(hasJitInfo());
+ return jitInfoUnchecked();
+ }
+ const JSJitInfo* jitInfoUnchecked() const {
+ // Can be called by Ion off-main thread.
+ return static_cast<const JSJitInfo*>(nativeJitInfoOrInterpretedScript());
+ }
+ void setJitInfo(const JSJitInfo* data) {
+ MOZ_ASSERT(isBuiltinNative());
+ MOZ_ASSERT(data);
+ setNativeJitInfoOrInterpretedScript(const_cast<JSJitInfo*>(data));
+ }
+
+ // wasm functions are always natives and either:
+ // - store a function-index in u.n.extra and can only be called through the
+ // fun->native() entry point from C++.
+ // - store a jit-entry code pointer in u.n.extra and can be called by jit
+ // code directly. C++ callers can still use the fun->native() entry point
+ // (computing the function index from the jit-entry point).
+ void setWasmFuncIndex(uint32_t funcIndex) {
+ MOZ_ASSERT(isWasm() || isAsmJSNative());
+ MOZ_ASSERT(!isWasmWithJitEntry());
+ MOZ_ASSERT(!nativeJitInfoOrInterpretedScript());
+ // See wasmFuncIndex_ comment for why we set the low bit.
+ uintptr_t tagged = (uintptr_t(funcIndex) << 1) | 1;
+ setNativeJitInfoOrInterpretedScript(reinterpret_cast<void*>(tagged));
+ }
+ uint32_t wasmFuncIndex() const {
+ MOZ_ASSERT(isWasm() || isAsmJSNative());
+ MOZ_ASSERT(!isWasmWithJitEntry());
+ uintptr_t tagged = uintptr_t(nativeJitInfoOrInterpretedScript());
+ MOZ_ASSERT(tagged & 1);
+ return tagged >> 1;
+ }
+ void setWasmJitEntry(void** entry) {
+ MOZ_ASSERT(*entry);
+ MOZ_ASSERT(isWasm());
+ MOZ_ASSERT(!isWasmWithJitEntry());
+ setFlags(flags().setWasmJitEntry());
+ setNativeJitInfoOrInterpretedScript(entry);
+ MOZ_ASSERT(isWasmWithJitEntry());
+ }
+ void** wasmJitEntry() const {
+ MOZ_ASSERT(isWasmWithJitEntry());
+ return static_cast<void**>(nativeJitInfoOrInterpretedScript());
+ }
+ inline js::wasm::Instance& wasmInstance() const;
+
+ bool isDerivedClassConstructor() const;
+ bool isSyntheticFunction() const;
+
+ static unsigned offsetOfNativeOrEnv() {
+ return getFixedSlotOffset(NativeFuncOrInterpretedEnvSlot);
+ }
+ static unsigned offsetOfJitInfoOrScript() {
+ return getFixedSlotOffset(NativeJitInfoOrInterpretedScriptSlot);
+ }
+
+ inline void trace(JSTracer* trc);
+
+ public:
+ inline bool isExtended() const {
+ bool extended = flags().isExtended();
+ MOZ_ASSERT_IF(isTenured(),
+ extended == (asTenured().getAllocKind() ==
+ js::gc::AllocKind::FUNCTION_EXTENDED));
+ return extended;
+ }
+
+ /*
+ * Accessors for data stored in extended functions. Use setExtendedSlot if the
+ * function has already been initialized. Otherwise use initExtendedSlot.
+ */
+ inline void initExtendedSlot(uint32_t slot, const js::Value& val);
+ inline void setExtendedSlot(uint32_t slot, const js::Value& val);
+ inline const js::Value& getExtendedSlot(uint32_t slot) const;
+
+ /* GC support. */
+ js::gc::AllocKind getAllocKind() const {
+ static_assert(
+ js::gc::AllocKind::FUNCTION != js::gc::AllocKind::FUNCTION_EXTENDED,
+ "extended/non-extended AllocKinds have to be different "
+ "for getAllocKind() to have a reason to exist");
+
+ js::gc::AllocKind kind = js::gc::AllocKind::FUNCTION;
+ if (isExtended()) {
+ kind = js::gc::AllocKind::FUNCTION_EXTENDED;
+ }
+ MOZ_ASSERT_IF(isTenured(), kind == asTenured().getAllocKind());
+ return kind;
+ }
+
+ // If we're constructing with this function, choose an appropriate
+ // allocKind.
+ static bool getAllocKindForThis(JSContext* cx, js::HandleFunction func,
+ js::gc::AllocKind& allocKind);
+};
+
+static_assert(sizeof(JSFunction) == sizeof(JS::shadow::Function),
+ "shadow interface must match actual interface");
+
+static_assert(unsigned(JSFunction::FlagsAndArgCountSlot) ==
+ unsigned(JS::shadow::Function::FlagsAndArgCountSlot));
+static_assert(unsigned(JSFunction::NativeFuncOrInterpretedEnvSlot) ==
+ unsigned(JS::shadow::Function::NativeFuncOrInterpretedEnvSlot));
+static_assert(
+ unsigned(JSFunction::NativeJitInfoOrInterpretedScriptSlot) ==
+ unsigned(JS::shadow::Function::NativeJitInfoOrInterpretedScriptSlot));
+static_assert(unsigned(JSFunction::AtomSlot) ==
+ unsigned(JS::shadow::Function::AtomSlot));
+
+extern JSString* fun_toStringHelper(JSContext* cx, js::HandleObject obj,
+ bool isToSource);
+
+namespace js {
+
+extern bool Function(JSContext* cx, unsigned argc, Value* vp);
+
+extern bool Generator(JSContext* cx, unsigned argc, Value* vp);
+
+extern bool AsyncFunctionConstructor(JSContext* cx, unsigned argc, Value* vp);
+
+extern bool AsyncGeneratorConstructor(JSContext* cx, unsigned argc, Value* vp);
+
+// If enclosingEnv is null, the function will have a null environment()
+// (yes, null, not the global lexical environment). In all cases, the global
+// will be used as the terminating environment.
+
+extern JSFunction* NewFunctionWithProto(
+ JSContext* cx, JSNative native, unsigned nargs, FunctionFlags flags,
+ HandleObject enclosingEnv, Handle<JSAtom*> atom, HandleObject proto,
+ gc::AllocKind allocKind = gc::AllocKind::FUNCTION,
+ NewObjectKind newKind = GenericObject);
+
+// Allocate a new function backed by a JSNative. Note that by default this
+// creates a tenured object.
+inline JSFunction* NewNativeFunction(
+ JSContext* cx, JSNative native, unsigned nargs, Handle<JSAtom*> atom,
+ gc::AllocKind allocKind = gc::AllocKind::FUNCTION,
+ NewObjectKind newKind = TenuredObject,
+ FunctionFlags flags = FunctionFlags::NATIVE_FUN) {
+ MOZ_ASSERT(native);
+ return NewFunctionWithProto(cx, native, nargs, flags, nullptr, atom, nullptr,
+ allocKind, newKind);
+}
+
+// Allocate a new constructor backed by a JSNative. Note that by default this
+// creates a tenured object.
+inline JSFunction* NewNativeConstructor(
+ JSContext* cx, JSNative native, unsigned nargs, Handle<JSAtom*> atom,
+ gc::AllocKind allocKind = gc::AllocKind::FUNCTION,
+ NewObjectKind newKind = TenuredObject,
+ FunctionFlags flags = FunctionFlags::NATIVE_CTOR) {
+ MOZ_ASSERT(native);
+ MOZ_ASSERT(flags.isNativeConstructor());
+ return NewFunctionWithProto(cx, native, nargs, flags, nullptr, atom, nullptr,
+ allocKind, newKind);
+}
+
+// Determine which [[Prototype]] to use when creating a new function using the
+// requested generator and async kind.
+//
+// This sets `proto` to `nullptr` for non-generator, synchronous functions to
+// mean "the builtin %FunctionPrototype% in the current realm", the common case.
+//
+// We could set it to `cx->global()->getOrCreateFunctionPrototype()`, but
+// nullptr gets a fast path in e.g. js::NewObjectWithClassProtoCommon.
+extern bool GetFunctionPrototype(JSContext* cx, js::GeneratorKind generatorKind,
+ js::FunctionAsyncKind asyncKind,
+ js::MutableHandleObject proto);
+
+extern JSAtom* IdToFunctionName(
+ JSContext* cx, HandleId id,
+ FunctionPrefixKind prefixKind = FunctionPrefixKind::None);
+
+extern bool SetFunctionName(JSContext* cx, HandleFunction fun, HandleValue name,
+ FunctionPrefixKind prefixKind);
+
+extern JSFunction* DefineFunction(
+ JSContext* cx, HandleObject obj, HandleId id, JSNative native,
+ unsigned nargs, unsigned flags,
+ gc::AllocKind allocKind = gc::AllocKind::FUNCTION);
+
+extern bool fun_toString(JSContext* cx, unsigned argc, Value* vp);
+
+extern void ThrowTypeErrorBehavior(JSContext* cx);
+
+/*
+ * Function extended with reserved slots for use by various kinds of functions.
+ * Most functions do not have these extensions, but enough do that efficient
+ * storage is required (no malloc'ed reserved slots).
+ */
+class FunctionExtended : public JSFunction {
+ public:
+ enum {
+ FirstExtendedSlot = JSFunction::SlotCount,
+ SecondExtendedSlot,
+
+ SlotCount
+ };
+
+ static const uint32_t NUM_EXTENDED_SLOTS = 2;
+
+ static const uint32_t METHOD_HOMEOBJECT_SLOT = 0;
+
+ // wasm/asm.js exported functions store a code pointer to their direct entry
+ // point (see CodeRange::funcUncheckedCallEntry()) to support the call_ref
+ // instruction.
+ static const uint32_t WASM_FUNC_UNCHECKED_ENTRY_SLOT = 0;
+
+ // wasm/asm.js exported functions store the wasm::Instance pointer of their
+ // instance.
+ static const uint32_t WASM_INSTANCE_SLOT = 1;
+
+ // asm.js module functions store their WasmModuleObject in the first slot.
+ static const uint32_t ASMJS_MODULE_SLOT = 0;
+
+ // Async module callback handlers store their ModuleObject in the first slot.
+ static const uint32_t MODULE_SLOT = 0;
+
+ static inline size_t offsetOfExtendedSlot(uint32_t which) {
+ MOZ_ASSERT(which < NUM_EXTENDED_SLOTS);
+ return getFixedSlotOffset(FirstExtendedSlot + which);
+ }
+ static inline size_t offsetOfMethodHomeObjectSlot() {
+ return offsetOfExtendedSlot(METHOD_HOMEOBJECT_SLOT);
+ }
+
+ private:
+ friend class JSFunction;
+};
+
+extern JSFunction* CloneFunctionReuseScript(JSContext* cx, HandleFunction fun,
+ HandleObject enclosingEnv,
+ HandleObject proto);
+
+extern JSFunction* CloneAsmJSModuleFunction(JSContext* cx, HandleFunction fun);
+
+} // namespace js
+
+template <>
+inline bool JSObject::is<JSFunction>() const {
+ return getClass()->isJSFunction();
+}
+
+inline void JSFunction::initExtendedSlot(uint32_t which, const js::Value& val) {
+ MOZ_ASSERT(isExtended());
+ MOZ_ASSERT(which < js::FunctionExtended::NUM_EXTENDED_SLOTS);
+ MOZ_ASSERT(js::IsObjectValueInCompartment(val, compartment()));
+ initFixedSlot(js::FunctionExtended::FirstExtendedSlot + which, val);
+}
+
+inline void JSFunction::setExtendedSlot(uint32_t which, const js::Value& val) {
+ MOZ_ASSERT(isExtended());
+ MOZ_ASSERT(which < js::FunctionExtended::NUM_EXTENDED_SLOTS);
+ MOZ_ASSERT(js::IsObjectValueInCompartment(val, compartment()));
+ setFixedSlot(js::FunctionExtended::FirstExtendedSlot + which, val);
+}
+
+inline const js::Value& JSFunction::getExtendedSlot(uint32_t which) const {
+ MOZ_ASSERT(isExtended());
+ MOZ_ASSERT(which < js::FunctionExtended::NUM_EXTENDED_SLOTS);
+ return getFixedSlot(js::FunctionExtended::FirstExtendedSlot + which);
+}
+
+inline js::wasm::Instance& JSFunction::wasmInstance() const {
+ MOZ_ASSERT(isWasm() || isAsmJSNative());
+ MOZ_ASSERT(
+ !getExtendedSlot(js::FunctionExtended::WASM_INSTANCE_SLOT).isUndefined());
+ return *static_cast<js::wasm::Instance*>(
+ getExtendedSlot(js::FunctionExtended::WASM_INSTANCE_SLOT).toPrivate());
+}
+
+namespace js {
+
+JSString* FunctionToString(JSContext* cx, HandleFunction fun, bool isToSource);
+
+/*
+ * Report an error that call.thisv is not compatible with the specified class,
+ * assuming that the method (clasp->name).prototype.<name of callee function>
+ * is what was called.
+ */
+extern void ReportIncompatibleMethod(JSContext* cx, const CallArgs& args,
+ const JSClass* clasp);
+
+/*
+ * Report an error that call.thisv is not an acceptable this for the callee
+ * function.
+ */
+extern void ReportIncompatible(JSContext* cx, const CallArgs& args);
+
+extern bool fun_apply(JSContext* cx, unsigned argc, Value* vp);
+
+extern bool fun_call(JSContext* cx, unsigned argc, Value* vp);
+
+} /* namespace js */
+
+#ifdef DEBUG
+namespace JS {
+namespace detail {
+
+JS_PUBLIC_API void CheckIsValidConstructible(const Value& calleev);
+
+} // namespace detail
+} // namespace JS
+#endif
+
+#endif /* vm_JSFunction_h */
diff --git a/js/src/vm/JSONParser.cpp b/js/src/vm/JSONParser.cpp
new file mode 100644
index 0000000000..01a5e825c6
--- /dev/null
+++ b/js/src/vm/JSONParser.cpp
@@ -0,0 +1,1107 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/JSONParser.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Attributes.h" // MOZ_STACK_CLASS
+#include "mozilla/Range.h" // mozilla::Range
+#include "mozilla/RangedPtr.h" // mozilla::RangedPtr
+
+#include "mozilla/Sprintf.h" // SprintfLiteral
+#include "mozilla/TextUtils.h" // mozilla::AsciiAlphanumericToNumber, mozilla::IsAsciiDigit, mozilla::IsAsciiHexDigit
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint32_t
+#include <utility> // std::move
+
+#include "jsnum.h" // ParseDecimalNumber, GetFullInteger, FullStringToDouble
+
+#include "builtin/Array.h" // NewDenseCopiedArray
+#include "ds/IdValuePair.h" // IdValuePair
+#include "gc/Allocator.h" // CanGC
+#include "gc/Tracer.h" // JS::TraceRoot
+#include "js/AllocPolicy.h" // ReportOutOfMemory
+#include "js/ErrorReport.h" // JS_ReportErrorNumberASCII
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/GCVector.h" // JS::GCVector
+#include "js/Id.h" // jsid
+#include "js/JSON.h" // JS::IsValidJSON
+#include "js/RootingAPI.h" // JS::Handle, JS::MutableHandle, MutableWrappedPtrOperations
+#include "js/TypeDecls.h" // Latin1Char
+#include "js/Utility.h" // js_delete
+#include "js/Value.h" // JS::Value, JS::BooleanValue, JS::NullValue, JS::NumberValue, JS::StringValue
+#include "js/Vector.h" // Vector
+#include "util/StringBuffer.h" // JSStringBuilder
+#include "vm/ArrayObject.h" // ArrayObject
+#include "vm/ErrorReporting.h" // ReportCompileErrorLatin1, ErrorMetadata
+#include "vm/JSAtom.h" // AtomizeChars
+#include "vm/JSContext.h" // JSContext
+#include "vm/PlainObject.h" // NewPlainObjectWithMaybeDuplicateKeys
+#include "vm/StringType.h" // JSString, JSAtom, JSLinearString, NewStringCopyN, NameToId
+
+#include "vm/JSAtom-inl.h" // AtomToId
+
+using namespace js;
+
+using mozilla::AsciiAlphanumericToNumber;
+using mozilla::IsAsciiDigit;
+using mozilla::IsAsciiHexDigit;
+using mozilla::RangedPtr;
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+template <JSONStringType ST>
+JSONToken JSONTokenizer<CharT, ParserT, StringBuilderT>::stringToken(
+ const CharPtr start, size_t length) {
+ if (!parser->handler.template setStringValue<ST>(start, length)) {
+ return JSONToken::OOM;
+ }
+ return JSONToken::String;
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+template <JSONStringType ST>
+JSONToken JSONTokenizer<CharT, ParserT, StringBuilderT>::stringToken(
+ StringBuilderT& builder) {
+ if (!parser->handler.template setStringValue<ST>(builder)) {
+ return JSONToken::OOM;
+ }
+ return JSONToken::String;
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+JSONToken JSONTokenizer<CharT, ParserT, StringBuilderT>::numberToken(double d) {
+ parser->handler.setNumberValue(d);
+ return JSONToken::Number;
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+template <JSONStringType ST>
+JSONToken JSONTokenizer<CharT, ParserT, StringBuilderT>::readString() {
+ MOZ_ASSERT(current < end);
+ MOZ_ASSERT(*current == '"');
+
+ /*
+ * JSONString:
+ * /^"([^\u0000-\u001F"\\]|\\(["/\\bfnrt]|u[0-9a-fA-F]{4}))*"$/
+ */
+
+ if (++current == end) {
+ error("unterminated string literal");
+ return token(JSONToken::Error);
+ }
+
+ /*
+ * Optimization: if the source contains no escaped characters, create the
+ * string directly from the source text.
+ */
+ CharPtr start = current;
+ for (; current < end; current++) {
+ if (*current == '"') {
+ size_t length = current - start;
+ current++;
+ return stringToken<ST>(start, length);
+ }
+
+ if (*current == '\\') {
+ break;
+ }
+
+ if (*current <= 0x001F) {
+ error("bad control character in string literal");
+ return token(JSONToken::Error);
+ }
+ }
+
+ /*
+ * Slow case: string contains escaped characters. Copy a maximal sequence
+ * of unescaped characters into a temporary buffer, then an escaped
+ * character, and repeat until the entire string is consumed.
+ */
+ StringBuilderT builder(parser->handler.context());
+ do {
+ if (start < current && !builder.append(start.get(), current.get())) {
+ return token(JSONToken::OOM);
+ }
+
+ if (current >= end) {
+ break;
+ }
+
+ char16_t c = *current++;
+ if (c == '"') {
+ return stringToken<ST>(builder);
+ }
+
+ if (c != '\\') {
+ --current;
+ error("bad character in string literal");
+ return token(JSONToken::Error);
+ }
+
+ if (current >= end) {
+ break;
+ }
+
+ switch (*current++) {
+ case '"':
+ c = '"';
+ break;
+ case '/':
+ c = '/';
+ break;
+ case '\\':
+ c = '\\';
+ break;
+ case 'b':
+ c = '\b';
+ break;
+ case 'f':
+ c = '\f';
+ break;
+ case 'n':
+ c = '\n';
+ break;
+ case 'r':
+ c = '\r';
+ break;
+ case 't':
+ c = '\t';
+ break;
+
+ case 'u':
+ if (end - current < 4 ||
+ !(IsAsciiHexDigit(current[0]) && IsAsciiHexDigit(current[1]) &&
+ IsAsciiHexDigit(current[2]) && IsAsciiHexDigit(current[3]))) {
+ // Point to the first non-hexadecimal character (which may be
+ // missing).
+ if (current == end || !IsAsciiHexDigit(current[0])) {
+ ; // already at correct location
+ } else if (current + 1 == end || !IsAsciiHexDigit(current[1])) {
+ current += 1;
+ } else if (current + 2 == end || !IsAsciiHexDigit(current[2])) {
+ current += 2;
+ } else if (current + 3 == end || !IsAsciiHexDigit(current[3])) {
+ current += 3;
+ } else {
+ MOZ_CRASH("logic error determining first erroneous character");
+ }
+
+ error("bad Unicode escape");
+ return token(JSONToken::Error);
+ }
+ c = (AsciiAlphanumericToNumber(current[0]) << 12) |
+ (AsciiAlphanumericToNumber(current[1]) << 8) |
+ (AsciiAlphanumericToNumber(current[2]) << 4) |
+ (AsciiAlphanumericToNumber(current[3]));
+ current += 4;
+ break;
+
+ default:
+ current--;
+ error("bad escaped character");
+ return token(JSONToken::Error);
+ }
+ if (!builder.append(c)) {
+ return token(JSONToken::OOM);
+ }
+
+ start = current;
+ for (; current < end; current++) {
+ if (*current == '"' || *current == '\\' || *current <= 0x001F) {
+ break;
+ }
+ }
+ } while (current < end);
+
+ error("unterminated string");
+ return token(JSONToken::Error);
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+JSONToken JSONTokenizer<CharT, ParserT, StringBuilderT>::readNumber() {
+ MOZ_ASSERT(current < end);
+ MOZ_ASSERT(IsAsciiDigit(*current) || *current == '-');
+
+ /*
+ * JSONNumber:
+ * /^-?(0|[1-9][0-9]+)(\.[0-9]+)?([eE][\+\-]?[0-9]+)?$/
+ */
+
+ bool negative = *current == '-';
+
+ /* -? */
+ if (negative && ++current == end) {
+ error("no number after minus sign");
+ return token(JSONToken::Error);
+ }
+
+ const CharPtr digitStart = current;
+
+ /* 0|[1-9][0-9]+ */
+ if (!IsAsciiDigit(*current)) {
+ error("unexpected non-digit");
+ return token(JSONToken::Error);
+ }
+ if (*current++ != '0') {
+ for (; current < end; current++) {
+ if (!IsAsciiDigit(*current)) {
+ break;
+ }
+ }
+ }
+
+ /* Fast path: no fractional or exponent part. */
+ if (current == end ||
+ (*current != '.' && *current != 'e' && *current != 'E')) {
+ mozilla::Range<const CharT> chars(digitStart.get(), current - digitStart);
+ if (chars.length() < strlen("9007199254740992")) {
+ // If the decimal number is shorter than the length of 2**53, (the
+ // largest number a double can represent with integral precision),
+ // parse it using a decimal-only parser. This comparison is
+ // conservative but faster than a fully-precise check.
+ double d = ParseDecimalNumber(chars);
+ return numberToken(negative ? -d : d);
+ }
+
+ double d;
+ if (!GetFullInteger(digitStart.get(), current.get(), 10,
+ IntegerSeparatorHandling::None, &d)) {
+ parser->outOfMemory();
+ return token(JSONToken::OOM);
+ }
+ return numberToken(negative ? -d : d);
+ }
+
+ /* (\.[0-9]+)? */
+ if (current < end && *current == '.') {
+ if (++current == end) {
+ error("missing digits after decimal point");
+ return token(JSONToken::Error);
+ }
+ if (!IsAsciiDigit(*current)) {
+ error("unterminated fractional number");
+ return token(JSONToken::Error);
+ }
+ while (++current < end) {
+ if (!IsAsciiDigit(*current)) {
+ break;
+ }
+ }
+ }
+
+ /* ([eE][\+\-]?[0-9]+)? */
+ if (current < end && (*current == 'e' || *current == 'E')) {
+ if (++current == end) {
+ error("missing digits after exponent indicator");
+ return token(JSONToken::Error);
+ }
+ if (*current == '+' || *current == '-') {
+ if (++current == end) {
+ error("missing digits after exponent sign");
+ return token(JSONToken::Error);
+ }
+ }
+ if (!IsAsciiDigit(*current)) {
+ error("exponent part is missing a number");
+ return token(JSONToken::Error);
+ }
+ while (++current < end) {
+ if (!IsAsciiDigit(*current)) {
+ break;
+ }
+ }
+ }
+
+ double d = FullStringToDouble(digitStart.get(), current.get());
+ return numberToken(negative ? -d : d);
+}
+
+static inline bool IsJSONWhitespace(char16_t c) {
+ return c == '\t' || c == '\r' || c == '\n' || c == ' ';
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+bool JSONTokenizer<CharT, ParserT,
+ StringBuilderT>::consumeTrailingWhitespaces() {
+ for (; current < end; current++) {
+ if (!IsJSONWhitespace(*current)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+JSONToken JSONTokenizer<CharT, ParserT, StringBuilderT>::advance() {
+ while (current < end && IsJSONWhitespace(*current)) {
+ current++;
+ }
+ if (current >= end) {
+ error("unexpected end of data");
+ return token(JSONToken::Error);
+ }
+
+ switch (*current) {
+ case '"':
+ return readString<JSONStringType::LiteralValue>();
+
+ case '-':
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ return readNumber();
+
+ case 't':
+ if (end - current < 4 || current[1] != 'r' || current[2] != 'u' ||
+ current[3] != 'e') {
+ error("unexpected keyword");
+ return token(JSONToken::Error);
+ }
+ current += 4;
+ return token(JSONToken::True);
+
+ case 'f':
+ if (end - current < 5 || current[1] != 'a' || current[2] != 'l' ||
+ current[3] != 's' || current[4] != 'e') {
+ error("unexpected keyword");
+ return token(JSONToken::Error);
+ }
+ current += 5;
+ return token(JSONToken::False);
+
+ case 'n':
+ if (end - current < 4 || current[1] != 'u' || current[2] != 'l' ||
+ current[3] != 'l') {
+ error("unexpected keyword");
+ return token(JSONToken::Error);
+ }
+ current += 4;
+ return token(JSONToken::Null);
+
+ case '[':
+ current++;
+ return token(JSONToken::ArrayOpen);
+ case ']':
+ current++;
+ return token(JSONToken::ArrayClose);
+
+ case '{':
+ current++;
+ return token(JSONToken::ObjectOpen);
+ case '}':
+ current++;
+ return token(JSONToken::ObjectClose);
+
+ case ',':
+ current++;
+ return token(JSONToken::Comma);
+
+ case ':':
+ current++;
+ return token(JSONToken::Colon);
+
+ default:
+ error("unexpected character");
+ return token(JSONToken::Error);
+ }
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+JSONToken JSONTokenizer<CharT, ParserT, StringBuilderT>::advancePropertyName() {
+ MOZ_ASSERT(current[-1] == ',');
+
+ while (current < end && IsJSONWhitespace(*current)) {
+ current++;
+ }
+ if (current >= end) {
+ error("end of data when property name was expected");
+ return token(JSONToken::Error);
+ }
+
+ if (*current == '"') {
+ return readString<JSONStringType::PropertyName>();
+ }
+
+ error("expected double-quoted property name");
+ return token(JSONToken::Error);
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+JSONToken
+JSONTokenizer<CharT, ParserT, StringBuilderT>::advancePropertyColon() {
+ MOZ_ASSERT(current[-1] == '"');
+
+ while (current < end && IsJSONWhitespace(*current)) {
+ current++;
+ }
+ if (current >= end) {
+ error("end of data after property name when ':' was expected");
+ return token(JSONToken::Error);
+ }
+
+ if (*current == ':') {
+ current++;
+ return token(JSONToken::Colon);
+ }
+
+ error("expected ':' after property name in object");
+ return token(JSONToken::Error);
+}
+
+template <typename CharT>
+static inline void AssertPastValue(const RangedPtr<const CharT> current) {
+ /*
+ * We're past an arbitrary JSON value, so the previous character is
+ * *somewhat* constrained, even if this assertion is pretty broad. Don't
+ * knock it till you tried it: this assertion *did* catch a bug once.
+ */
+ MOZ_ASSERT((current[-1] == 'l' && current[-2] == 'l' && current[-3] == 'u' &&
+ current[-4] == 'n') ||
+ (current[-1] == 'e' && current[-2] == 'u' && current[-3] == 'r' &&
+ current[-4] == 't') ||
+ (current[-1] == 'e' && current[-2] == 's' && current[-3] == 'l' &&
+ current[-4] == 'a' && current[-5] == 'f') ||
+ current[-1] == '}' || current[-1] == ']' || current[-1] == '"' ||
+ IsAsciiDigit(current[-1]));
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+JSONToken
+JSONTokenizer<CharT, ParserT, StringBuilderT>::advanceAfterProperty() {
+ AssertPastValue(current);
+
+ while (current < end && IsJSONWhitespace(*current)) {
+ current++;
+ }
+ if (current >= end) {
+ error("end of data after property value in object");
+ return token(JSONToken::Error);
+ }
+
+ if (*current == ',') {
+ current++;
+ return token(JSONToken::Comma);
+ }
+
+ if (*current == '}') {
+ current++;
+ return token(JSONToken::ObjectClose);
+ }
+
+ error("expected ',' or '}' after property value in object");
+ return token(JSONToken::Error);
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+JSONToken
+JSONTokenizer<CharT, ParserT, StringBuilderT>::advanceAfterObjectOpen() {
+ MOZ_ASSERT(current[-1] == '{');
+
+ while (current < end && IsJSONWhitespace(*current)) {
+ current++;
+ }
+ if (current >= end) {
+ error("end of data while reading object contents");
+ return token(JSONToken::Error);
+ }
+
+ if (*current == '"') {
+ return readString<JSONStringType::PropertyName>();
+ }
+
+ if (*current == '}') {
+ current++;
+ return token(JSONToken::ObjectClose);
+ }
+
+ error("expected property name or '}'");
+ return token(JSONToken::Error);
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+JSONToken
+JSONTokenizer<CharT, ParserT, StringBuilderT>::advanceAfterArrayElement() {
+ AssertPastValue(current);
+
+ while (current < end && IsJSONWhitespace(*current)) {
+ current++;
+ }
+ if (current >= end) {
+ error("end of data when ',' or ']' was expected");
+ return token(JSONToken::Error);
+ }
+
+ if (*current == ',') {
+ current++;
+ return token(JSONToken::Comma);
+ }
+
+ if (*current == ']') {
+ current++;
+ return token(JSONToken::ArrayClose);
+ }
+
+ error("expected ',' or ']' after array element");
+ return token(JSONToken::Error);
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+void JSONTokenizer<CharT, ParserT, StringBuilderT>::error(const char* msg) {
+ parser->error(msg);
+}
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+void JSONTokenizer<CharT, ParserT, StringBuilderT>::getTextPosition(
+ uint32_t* column, uint32_t* line) {
+ CharPtr ptr = begin;
+ uint32_t col = 1;
+ uint32_t row = 1;
+ for (; ptr < current; ptr++) {
+ if (*ptr == '\n' || *ptr == '\r') {
+ ++row;
+ col = 1;
+ // \r\n is treated as a single newline.
+ if (ptr + 1 < current && *ptr == '\r' && *(ptr + 1) == '\n') {
+ ++ptr;
+ }
+ } else {
+ ++col;
+ }
+ }
+ *column = col;
+ *line = row;
+}
+
+JSONFullParseHandlerAnyChar::~JSONFullParseHandlerAnyChar() {
+ for (size_t i = 0; i < freeElements.length(); i++) {
+ js_delete(freeElements[i]);
+ }
+
+ for (size_t i = 0; i < freeProperties.length(); i++) {
+ js_delete(freeProperties[i]);
+ }
+}
+
+void JSONFullParseHandlerAnyChar::trace(JSTracer* trc) {
+ JS::TraceRoot(trc, &v, "JSONFullParseHandlerAnyChar current value");
+}
+
+inline void JSONFullParseHandlerAnyChar::freeStackEntry(StackEntry& entry) {
+ if (entry.state == JSONParserState::FinishArrayElement) {
+ js_delete(&entry.elements());
+ } else {
+ js_delete(&entry.properties());
+ }
+}
+
+template <typename CharT>
+void JSONParser<CharT>::trace(JSTracer* trc) {
+ this->handler.trace(trc);
+
+ for (auto& elem : this->stack) {
+ if (elem.state == JSONParserState::FinishArrayElement) {
+ elem.elements().trace(trc);
+ } else {
+ elem.properties().trace(trc);
+ }
+ }
+}
+
+inline void JSONFullParseHandlerAnyChar::setNumberValue(double d) {
+ v = JS::NumberValue(d);
+}
+
+template <typename CharT>
+template <JSONStringType ST>
+inline bool JSONFullParseHandler<CharT>::setStringValue(CharPtr start,
+ size_t length) {
+ JSLinearString* str = (ST == JSONStringType::PropertyName)
+ ? AtomizeChars(cx, start.get(), length)
+ : NewStringCopyN<CanGC>(cx, start.get(), length);
+ if (!str) {
+ return false;
+ }
+ v = JS::StringValue(str);
+ return true;
+}
+
+template <typename CharT>
+template <JSONStringType ST>
+inline bool JSONFullParseHandler<CharT>::setStringValue(
+ StringBuilder& builder) {
+ JSLinearString* str = (ST == JSONStringType::PropertyName)
+ ? builder.buffer.finishAtom()
+ : builder.buffer.finishString();
+ if (!str) {
+ return false;
+ }
+ v = JS::StringValue(str);
+ return true;
+}
+
+inline bool JSONFullParseHandlerAnyChar::objectOpen(
+ Vector<StackEntry, 10>& stack, PropertyVector** properties) {
+ if (!freeProperties.empty()) {
+ *properties = freeProperties.popCopy();
+ (*properties)->clear();
+ } else {
+ (*properties) = cx->new_<PropertyVector>(cx);
+ if (!*properties) {
+ return false;
+ }
+ }
+ if (!stack.append(*properties)) {
+ js_delete(*properties);
+ return false;
+ }
+
+ return true;
+}
+
+inline bool JSONFullParseHandlerAnyChar::objectPropertyName(
+ Vector<StackEntry, 10>& stack, bool* isProtoInEval) {
+ *isProtoInEval = false;
+ jsid id = AtomToId(atomValue());
+ if (parseType == ParseType::AttemptForEval) {
+ // In |JSON.parse|, "__proto__" is a property like any other and may
+ // appear multiple times. In object literal syntax, "__proto__" is
+ // prototype mutation and can appear at most once. |JSONParser| only
+ // supports the former semantics, so if this parse attempt is for
+ // |eval|, return true (without reporting an error) to indicate the
+ // JSON parse attempt was unsuccessful.
+ if (id == NameToId(cx->names().proto)) {
+ *isProtoInEval = true;
+ return true;
+ }
+ }
+ PropertyVector& properties = stack.back().properties();
+ if (!properties.emplaceBack(id)) {
+ return false;
+ }
+
+ return true;
+}
+
+inline void JSONFullParseHandlerAnyChar::finishObjectMember(
+ Vector<StackEntry, 10>& stack, JS::Handle<JS::Value> value,
+ PropertyVector** properties) {
+ *properties = &stack.back().properties();
+ (*properties)->back().value = value;
+}
+
+inline bool JSONFullParseHandlerAnyChar::finishObject(
+ Vector<StackEntry, 10>& stack, JS::MutableHandle<JS::Value> vp,
+ PropertyVector& properties) {
+ MOZ_ASSERT(&properties == &stack.back().properties());
+
+ JSObject* obj = NewPlainObjectWithMaybeDuplicateKeys(cx, properties.begin(),
+ properties.length());
+ if (!obj) {
+ return false;
+ }
+
+ vp.setObject(*obj);
+ if (!freeProperties.append(&properties)) {
+ return false;
+ }
+ stack.popBack();
+ return true;
+}
+
+inline bool JSONFullParseHandlerAnyChar::arrayOpen(
+ Vector<StackEntry, 10>& stack, ElementVector** elements) {
+ if (!freeElements.empty()) {
+ *elements = freeElements.popCopy();
+ (*elements)->clear();
+ } else {
+ (*elements) = cx->new_<ElementVector>(cx);
+ if (!*elements) {
+ return false;
+ }
+ }
+ if (!stack.append(*elements)) {
+ js_delete(*elements);
+ return false;
+ }
+
+ return true;
+}
+
+inline bool JSONFullParseHandlerAnyChar::arrayElement(
+ Vector<StackEntry, 10>& stack, JS::Handle<JS::Value> value,
+ ElementVector** elements) {
+ *elements = &stack.back().elements();
+ return (*elements)->append(value.get());
+}
+
+inline bool JSONFullParseHandlerAnyChar::finishArray(
+ Vector<StackEntry, 10>& stack, JS::MutableHandle<JS::Value> vp,
+ ElementVector& elements) {
+ MOZ_ASSERT(&elements == &stack.back().elements());
+
+ ArrayObject* obj =
+ NewDenseCopiedArray(cx, elements.length(), elements.begin());
+ if (!obj) {
+ return false;
+ }
+
+ vp.setObject(*obj);
+ if (!freeElements.append(&elements)) {
+ return false;
+ }
+ stack.popBack();
+ return true;
+}
+
+template <typename CharT>
+void JSONFullParseHandler<CharT>::reportError(const char* msg,
+ const char* lineString,
+ const char* columnString) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_JSON_BAD_PARSE,
+ msg, lineString, columnString);
+}
+
+template <typename CharT, typename HandlerT>
+void JSONPerHandlerParser<CharT, HandlerT>::outOfMemory() {
+ ReportOutOfMemory(handler.context());
+}
+
+template <typename CharT, typename HandlerT>
+void JSONPerHandlerParser<CharT, HandlerT>::error(const char* msg) {
+ if (handler.ignoreError()) {
+ return;
+ }
+
+ uint32_t column = 1, line = 1;
+ tokenizer.getTextPosition(&column, &line);
+
+ const size_t MaxWidth = sizeof("4294967295");
+ char columnNumber[MaxWidth];
+ SprintfLiteral(columnNumber, "%" PRIu32, column);
+ char lineNumber[MaxWidth];
+ SprintfLiteral(lineNumber, "%" PRIu32, line);
+
+ handler.reportError(msg, lineNumber, columnNumber);
+}
+
+template <typename CharT>
+bool JSONFullParseHandler<CharT>::StringBuilder::append(char16_t c) {
+ return buffer.append(c);
+}
+
+template <typename CharT>
+bool JSONFullParseHandler<CharT>::StringBuilder::append(const CharT* begin,
+ const CharT* end) {
+ return buffer.append(begin, end);
+}
+
+template <typename CharT, typename HandlerT>
+JSONPerHandlerParser<CharT, HandlerT>::~JSONPerHandlerParser() {
+ for (size_t i = 0; i < stack.length(); i++) {
+ handler.freeStackEntry(stack[i]);
+ }
+}
+
+template class js::JSONPerHandlerParser<Latin1Char,
+ js::JSONFullParseHandler<Latin1Char>>;
+template class js::JSONPerHandlerParser<char16_t,
+ js::JSONFullParseHandler<char16_t>>;
+
+template <typename CharT, typename HandlerT>
+template <typename TempValueT, typename ResultSetter>
+bool JSONPerHandlerParser<CharT, HandlerT>::parseImpl(TempValueT& value,
+ ResultSetter setResult) {
+ MOZ_ASSERT(stack.empty());
+
+ JSONToken token;
+ JSONParserState state = JSONParserState::JSONValue;
+ while (true) {
+ switch (state) {
+ case JSONParserState::FinishObjectMember: {
+ typename HandlerT::PropertyVector* properties;
+ handler.finishObjectMember(stack, value, &properties);
+
+ token = tokenizer.advanceAfterProperty();
+ if (token == JSONToken::ObjectClose) {
+ if (!handler.finishObject(stack, &value, *properties)) {
+ return false;
+ }
+ break;
+ }
+ if (token != JSONToken::Comma) {
+ if (token == JSONToken::OOM) {
+ return false;
+ }
+ if (token != JSONToken::Error) {
+ error(
+ "expected ',' or '}' after property-value pair in object "
+ "literal");
+ }
+ return handler.errorReturn();
+ }
+ token = tokenizer.advancePropertyName();
+ /* FALL THROUGH */
+ }
+
+ JSONMember:
+ if (token == JSONToken::String) {
+ bool isProtoInEval;
+ if (!handler.objectPropertyName(stack, &isProtoInEval)) {
+ return false;
+ }
+ if (isProtoInEval) {
+ // See JSONFullParseHandlerAnyChar::objectPropertyName.
+ return true;
+ }
+ token = tokenizer.advancePropertyColon();
+ if (token != JSONToken::Colon) {
+ MOZ_ASSERT(token == JSONToken::Error);
+ return handler.errorReturn();
+ }
+ goto JSONValue;
+ }
+ if (token == JSONToken::OOM) {
+ return false;
+ }
+ if (token != JSONToken::Error) {
+ error("property names must be double-quoted strings");
+ }
+ return handler.errorReturn();
+
+ case JSONParserState::FinishArrayElement: {
+ typename HandlerT::ElementVector* elements;
+ if (!handler.arrayElement(stack, value, &elements)) {
+ return false;
+ }
+ token = tokenizer.advanceAfterArrayElement();
+ if (token == JSONToken::Comma) {
+ goto JSONValue;
+ }
+ if (token == JSONToken::ArrayClose) {
+ if (!handler.finishArray(stack, &value, *elements)) {
+ return false;
+ }
+ break;
+ }
+ MOZ_ASSERT(token == JSONToken::Error);
+ return handler.errorReturn();
+ }
+
+ JSONValue:
+ case JSONParserState::JSONValue:
+ token = tokenizer.advance();
+ JSONValueSwitch:
+ switch (token) {
+ case JSONToken::String:
+ value = handler.stringValue();
+ break;
+ case JSONToken::Number:
+ value = handler.numberValue();
+ break;
+ case JSONToken::True:
+ value = handler.booleanValue(true);
+ break;
+ case JSONToken::False:
+ value = handler.booleanValue(false);
+ break;
+ case JSONToken::Null:
+ value = handler.nullValue();
+ break;
+
+ case JSONToken::ArrayOpen: {
+ typename HandlerT::ElementVector* elements;
+ if (!handler.arrayOpen(stack, &elements)) {
+ return false;
+ }
+
+ token = tokenizer.advance();
+ if (token == JSONToken::ArrayClose) {
+ if (!handler.finishArray(stack, &value, *elements)) {
+ return false;
+ }
+ break;
+ }
+ goto JSONValueSwitch;
+ }
+
+ case JSONToken::ObjectOpen: {
+ typename HandlerT::PropertyVector* properties;
+ if (!handler.objectOpen(stack, &properties)) {
+ return false;
+ }
+
+ token = tokenizer.advanceAfterObjectOpen();
+ if (token == JSONToken::ObjectClose) {
+ if (!handler.finishObject(stack, &value, *properties)) {
+ return false;
+ }
+ break;
+ }
+ goto JSONMember;
+ }
+
+ case JSONToken::ArrayClose:
+ case JSONToken::ObjectClose:
+ case JSONToken::Colon:
+ case JSONToken::Comma:
+ // Move the current pointer backwards so that the position
+ // reported in the error message is correct.
+ tokenizer.unget();
+ error("unexpected character");
+ return handler.errorReturn();
+
+ case JSONToken::OOM:
+ return false;
+
+ case JSONToken::Error:
+ return handler.errorReturn();
+ }
+ break;
+ }
+
+ if (stack.empty()) {
+ break;
+ }
+ state = stack.back().state;
+ }
+
+ if (!tokenizer.consumeTrailingWhitespaces()) {
+ error("unexpected non-whitespace character after JSON data");
+ return handler.errorReturn();
+ }
+
+ MOZ_ASSERT(tokenizer.finished());
+ MOZ_ASSERT(stack.empty());
+
+ setResult(value);
+ return true;
+}
+
+template <typename CharT>
+bool JSONParser<CharT>::parse(JS::MutableHandle<JS::Value> vp) {
+ JS::Rooted<JS::Value> tempValue(this->handler.cx);
+
+ vp.setUndefined();
+
+ return this->parseImpl(tempValue,
+ [&](JS::Handle<JS::Value> value) { vp.set(value); });
+}
+
+template class js::JSONParser<Latin1Char>;
+template class js::JSONParser<char16_t>;
+
+template <typename CharT>
+inline bool JSONSyntaxParseHandler<CharT>::objectOpen(
+ Vector<StackEntry, 10>& stack, PropertyVector** properties) {
+ StackEntry entry{JSONParserState::FinishObjectMember};
+ if (!stack.append(entry)) {
+ return false;
+ }
+ return true;
+}
+
+template <typename CharT>
+inline bool JSONSyntaxParseHandler<CharT>::finishObject(
+ Vector<StackEntry, 10>& stack, DummyValue* vp, PropertyVector& properties) {
+ stack.popBack();
+ return true;
+}
+
+template <typename CharT>
+inline bool JSONSyntaxParseHandler<CharT>::arrayOpen(
+ Vector<StackEntry, 10>& stack, ElementVector** elements) {
+ StackEntry entry{JSONParserState::FinishArrayElement};
+ if (!stack.append(entry)) {
+ return false;
+ }
+ return true;
+}
+
+template <typename CharT>
+inline bool JSONSyntaxParseHandler<CharT>::finishArray(
+ Vector<StackEntry, 10>& stack, DummyValue* vp, ElementVector& elements) {
+ stack.popBack();
+ return true;
+}
+
+static void ReportJSONSyntaxError(FrontendContext* fc, ErrorMetadata&& metadata,
+ unsigned errorNumber, ...) {
+ va_list args;
+ va_start(args, errorNumber);
+
+ js::ReportCompileErrorLatin1(fc, std::move(metadata), nullptr, errorNumber,
+ &args);
+
+ va_end(args);
+}
+
+template <typename CharT>
+void JSONSyntaxParseHandler<CharT>::reportError(const char* msg,
+ const char* lineString,
+ const char* columnString) {
+ ErrorMetadata metadata;
+ metadata.isMuted = false;
+ metadata.filename = "";
+ metadata.lineNumber = 0;
+ metadata.columnNumber = 0;
+
+ ReportJSONSyntaxError(fc, std::move(metadata), JSMSG_JSON_BAD_PARSE, msg,
+ lineString, columnString);
+}
+
+template class js::JSONSyntaxParseHandler<Latin1Char>;
+template class js::JSONSyntaxParseHandler<char16_t>;
+
+template <typename CharT>
+bool JSONSyntaxParser<CharT>::parse() {
+ typename HandlerT::DummyValue unused;
+
+ if (!this->parseImpl(unused,
+ [&](const typename HandlerT::DummyValue& unused) {})) {
+ return false;
+ }
+
+ return true;
+}
+
+template class js::JSONPerHandlerParser<Latin1Char,
+ js::JSONSyntaxParseHandler<Latin1Char>>;
+template class js::JSONPerHandlerParser<char16_t,
+ js::JSONSyntaxParseHandler<char16_t>>;
+
+template class js::JSONSyntaxParser<Latin1Char>;
+template class js::JSONSyntaxParser<char16_t>;
+
+template <typename CharT>
+static bool IsValidJSONImpl(const CharT* chars, uint32_t len) {
+ FrontendContext fc;
+ // NOTE: We don't set stack quota here because JSON parser doesn't use it.
+
+ JSONSyntaxParser<CharT> parser(&fc, mozilla::Range(chars, len));
+ if (!parser.parse()) {
+ MOZ_ASSERT(fc.hadErrors());
+ return false;
+ }
+ MOZ_ASSERT(!fc.hadErrors());
+
+ return true;
+}
+
+JS_PUBLIC_API bool JS::IsValidJSON(const JS::Latin1Char* chars, uint32_t len) {
+ return IsValidJSONImpl(chars, len);
+}
+
+JS_PUBLIC_API bool JS::IsValidJSON(const char16_t* chars, uint32_t len) {
+ return IsValidJSONImpl(chars, len);
+}
diff --git a/js/src/vm/JSONParser.h b/js/src/vm/JSONParser.h
new file mode 100644
index 0000000000..7c86d3e087
--- /dev/null
+++ b/js/src/vm/JSONParser.h
@@ -0,0 +1,517 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_JSONParser_h
+#define vm_JSONParser_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Attributes.h" // MOZ_STACK_CLASS
+#include "mozilla/Range.h" // mozilla::Range
+#include "mozilla/RangedPtr.h" // mozilla::RangedPtr
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint32_t
+#include <utility> // std::move
+
+#include "ds/IdValuePair.h" // IdValuePair
+#include "js/GCVector.h" // JS::GCVector
+#include "js/RootingAPI.h" // JS::Handle, JS::MutableHandle, MutableWrappedPtrOperations
+#include "js/Value.h" // JS::Value, JS::BooleanValue, JS::NullValue
+#include "js/Vector.h" // Vector
+#include "util/StringBuffer.h" // JSStringBuilder
+#include "vm/StringType.h" // JSString, JSAtom
+
+struct JSContext;
+class JSTracer;
+
+namespace js {
+
+class FrontendContext;
+
+enum class JSONToken {
+ String,
+ Number,
+ True,
+ False,
+ Null,
+ ArrayOpen,
+ ArrayClose,
+ ObjectOpen,
+ ObjectClose,
+ Colon,
+ Comma,
+ OOM,
+ Error
+};
+
+enum class JSONStringType { PropertyName, LiteralValue };
+
+template <typename CharT, typename ParserT, typename StringBuilderT>
+class MOZ_STACK_CLASS JSONTokenizer {
+ public:
+ using CharPtr = mozilla::RangedPtr<const CharT>;
+
+ protected:
+ CharPtr current;
+ const CharPtr begin, end;
+
+ ParserT* parser = nullptr;
+
+ public:
+ JSONTokenizer(CharPtr current, const CharPtr begin, const CharPtr end,
+ ParserT* parser)
+ : current(current), begin(begin), end(end), parser(parser) {
+ MOZ_ASSERT(current <= end);
+ MOZ_ASSERT(parser);
+ }
+
+ explicit JSONTokenizer(mozilla::Range<const CharT> data, ParserT* parser)
+ : JSONTokenizer(data.begin(), data.begin(), data.end(), parser) {}
+
+ JSONTokenizer(JSONTokenizer<CharT, ParserT, StringBuilderT>&& other) noexcept
+ : JSONTokenizer(other.current, other.begin, other.end, other.parser) {}
+
+ JSONTokenizer(const JSONTokenizer<CharT, ParserT, StringBuilderT>& other) =
+ delete;
+ void operator=(const JSONTokenizer<CharT, ParserT, StringBuilderT>& other) =
+ delete;
+
+ void fixupParser(ParserT* newParser) { parser = newParser; }
+
+ void getTextPosition(uint32_t* column, uint32_t* line);
+
+ bool consumeTrailingWhitespaces();
+
+ JSONToken advance();
+ JSONToken advancePropertyName();
+ JSONToken advancePropertyColon();
+ JSONToken advanceAfterProperty();
+ JSONToken advanceAfterObjectOpen();
+ JSONToken advanceAfterArrayElement();
+
+ void unget() { --current; }
+
+#ifdef DEBUG
+ bool finished() { return end == current; }
+#endif
+
+ JSONToken token(JSONToken t) {
+ MOZ_ASSERT(t != JSONToken::String);
+ MOZ_ASSERT(t != JSONToken::Number);
+ return t;
+ }
+
+ template <JSONStringType ST>
+ JSONToken stringToken(const CharPtr start, size_t length);
+ template <JSONStringType ST>
+ JSONToken stringToken(StringBuilderT& builder);
+
+ JSONToken numberToken(double d);
+
+ template <JSONStringType ST>
+ JSONToken readString();
+
+ JSONToken readNumber();
+
+ void error(const char* msg);
+};
+
+// Possible states the parser can be in between values.
+enum class JSONParserState {
+ // An array element has just being parsed.
+ FinishArrayElement,
+
+ // An object property has just been parsed.
+ FinishObjectMember,
+
+ // At the start of the parse, before any values have been processed.
+ JSONValue
+};
+
+// Character-type-agnostic base class for JSONFullParseHandler.
+// JSONParser is templatized to work on either Latin1
+// or TwoByte input strings, JSONFullParseHandlerAnyChar holds all state and
+// methods that can be shared between the two encodings.
+class MOZ_STACK_CLASS JSONFullParseHandlerAnyChar {
+ public:
+ // State related to the parser's current position. At all points in the
+ // parse this keeps track of the stack of arrays and objects which have
+ // been started but not finished yet. The actual JS object is not
+ // allocated until the literal is closed, so that the result can be sized
+ // according to its contents and have its type and shape filled in using
+ // caches.
+
+ // State for an array that is currently being parsed. This includes all
+ // elements that have been seen so far.
+ using ElementVector = JS::GCVector<JS::Value, 20>;
+
+ // State for an object that is currently being parsed. This includes all
+ // the key/value pairs that have been seen so far.
+ using PropertyVector = JS::GCVector<IdValuePair, 10>;
+
+ enum class ParseType {
+ // Parsing a string as if by JSON.parse.
+ JSONParse,
+ // Parsing what may or may not be JSON in a string of eval code.
+ // In this case, a failure to parse indicates either syntax that isn't JSON,
+ // or syntax that has different semantics in eval code than in JSON.
+ AttemptForEval,
+ };
+
+ // Stack element for an in progress array or object.
+ struct StackEntry {
+ ElementVector& elements() {
+ MOZ_ASSERT(state == JSONParserState::FinishArrayElement);
+ return *static_cast<ElementVector*>(vector);
+ }
+
+ PropertyVector& properties() {
+ MOZ_ASSERT(state == JSONParserState::FinishObjectMember);
+ return *static_cast<PropertyVector*>(vector);
+ }
+
+ explicit StackEntry(ElementVector* elements)
+ : state(JSONParserState::FinishArrayElement), vector(elements) {}
+
+ explicit StackEntry(PropertyVector* properties)
+ : state(JSONParserState::FinishObjectMember), vector(properties) {}
+
+ JSONParserState state;
+
+ private:
+ void* vector;
+ };
+
+ public:
+ /* Data members */
+
+ JSContext* cx;
+
+ JS::Value v;
+
+ ParseType parseType = ParseType::JSONParse;
+
+ private:
+ // Unused element and property vectors for previous in progress arrays and
+ // objects. These vectors are not freed until the end of the parse to avoid
+ // unnecessary freeing and allocation.
+ Vector<ElementVector*, 5> freeElements;
+ Vector<PropertyVector*, 5> freeProperties;
+
+ public:
+ explicit JSONFullParseHandlerAnyChar(JSContext* cx)
+ : cx(cx), freeElements(cx), freeProperties(cx) {}
+ ~JSONFullParseHandlerAnyChar();
+
+ // Allow move construction for use with Rooted.
+ JSONFullParseHandlerAnyChar(JSONFullParseHandlerAnyChar&& other) noexcept
+ : cx(other.cx),
+ v(other.v),
+ parseType(other.parseType),
+ freeElements(std::move(other.freeElements)),
+ freeProperties(std::move(other.freeProperties)) {}
+
+ JSONFullParseHandlerAnyChar(const JSONFullParseHandlerAnyChar& other) =
+ delete;
+ void operator=(const JSONFullParseHandlerAnyChar& other) = delete;
+
+ JSContext* context() { return cx; }
+
+ JS::Value numberValue() const {
+ MOZ_ASSERT(v.isNumber());
+ return v;
+ }
+
+ inline void setNumberValue(double d);
+
+ JS::Value stringValue() const {
+ MOZ_ASSERT(v.isString());
+ return v;
+ }
+
+ JSAtom* atomValue() const {
+ JS::Value strval = stringValue();
+ return &strval.toString()->asAtom();
+ }
+
+ inline JS::Value booleanValue(bool value) { return JS::BooleanValue(value); }
+ inline JS::Value nullValue() { return JS::NullValue(); }
+
+ inline bool objectOpen(Vector<StackEntry, 10>& stack,
+ PropertyVector** properties);
+ inline bool objectPropertyName(Vector<StackEntry, 10>& stack,
+ bool* isProtoInEval);
+ inline void finishObjectMember(Vector<StackEntry, 10>& stack,
+ JS::Handle<JS::Value> value,
+ PropertyVector** properties);
+ inline bool finishObject(Vector<StackEntry, 10>& stack,
+ JS::MutableHandle<JS::Value> vp,
+ PropertyVector& properties);
+
+ inline bool arrayOpen(Vector<StackEntry, 10>& stack,
+ ElementVector** elements);
+ inline bool arrayElement(Vector<StackEntry, 10>& stack,
+ JS::Handle<JS::Value> value,
+ ElementVector** elements);
+ inline bool finishArray(Vector<StackEntry, 10>& stack,
+ JS::MutableHandle<JS::Value> vp,
+ ElementVector& elements);
+
+ inline bool errorReturn() const {
+ return parseType == ParseType::AttemptForEval;
+ }
+
+ inline bool ignoreError() const {
+ return parseType == ParseType::AttemptForEval;
+ }
+
+ inline void freeStackEntry(StackEntry& entry);
+
+ void trace(JSTracer* trc);
+};
+
+template <typename CharT>
+class MOZ_STACK_CLASS JSONFullParseHandler
+ : public JSONFullParseHandlerAnyChar {
+ using Base = JSONFullParseHandlerAnyChar;
+ using CharPtr = mozilla::RangedPtr<const CharT>;
+
+ public:
+ using ContextT = JSContext;
+
+ class StringBuilder {
+ public:
+ JSStringBuilder buffer;
+
+ explicit StringBuilder(JSContext* cx) : buffer(cx) {}
+
+ bool append(char16_t c);
+ bool append(const CharT* begin, const CharT* end);
+ };
+
+ explicit JSONFullParseHandler(JSContext* cx) : Base(cx) {}
+
+ JSONFullParseHandler(JSONFullParseHandler&& other) noexcept
+ : Base(std::move(other)) {}
+
+ JSONFullParseHandler(const JSONFullParseHandler& other) = delete;
+ void operator=(const JSONFullParseHandler& other) = delete;
+
+ template <JSONStringType ST>
+ inline bool setStringValue(CharPtr start, size_t length);
+ template <JSONStringType ST>
+ inline bool setStringValue(StringBuilder& builder);
+
+ void reportError(const char* msg, const char* lineString,
+ const char* columnString);
+};
+
+template <typename CharT>
+class MOZ_STACK_CLASS JSONSyntaxParseHandler {
+ private:
+ using CharPtr = mozilla::RangedPtr<const CharT>;
+
+ public:
+ /* Types for templatized parser. */
+
+ using ContextT = FrontendContext;
+
+ class DummyValue {};
+
+ struct ElementVector {};
+ struct PropertyVector {};
+
+ class StringBuilder {
+ public:
+ explicit StringBuilder(FrontendContext* fc) {}
+
+ bool append(char16_t c) { return true; }
+ bool append(const CharT* begin, const CharT* end) { return true; }
+ };
+
+ struct StackEntry {
+ JSONParserState state;
+ };
+
+ public:
+ FrontendContext* fc;
+
+ /* Public API */
+
+ /* Create a parser for the provided JSON data. */
+ explicit JSONSyntaxParseHandler(FrontendContext* fc) : fc(fc) {}
+
+ JSONSyntaxParseHandler(JSONSyntaxParseHandler&& other) noexcept
+ : fc(other.fc) {}
+
+ JSONSyntaxParseHandler(const JSONSyntaxParseHandler& other) = delete;
+ void operator=(const JSONSyntaxParseHandler& other) = delete;
+
+ FrontendContext* context() { return fc; }
+
+ template <JSONStringType ST>
+ inline bool setStringValue(CharPtr start, size_t length) {
+ return true;
+ }
+
+ template <JSONStringType ST>
+ inline bool setStringValue(StringBuilder& builder) {
+ return true;
+ }
+
+ inline void setNumberValue(double d) {}
+
+ inline DummyValue numberValue() const { return DummyValue(); }
+
+ inline DummyValue stringValue() const { return DummyValue(); }
+
+ inline DummyValue booleanValue(bool value) { return DummyValue(); }
+ inline DummyValue nullValue() { return DummyValue(); }
+
+ inline bool objectOpen(Vector<StackEntry, 10>& stack,
+ PropertyVector** properties);
+ inline bool objectPropertyName(Vector<StackEntry, 10>& stack,
+ bool* isProtoInEval) {
+ *isProtoInEval = false;
+ return true;
+ }
+ inline void finishObjectMember(Vector<StackEntry, 10>& stack,
+ DummyValue& value,
+ PropertyVector** properties) {}
+ inline bool finishObject(Vector<StackEntry, 10>& stack, DummyValue* vp,
+ PropertyVector& properties);
+
+ inline bool arrayOpen(Vector<StackEntry, 10>& stack,
+ ElementVector** elements);
+ inline bool arrayElement(Vector<StackEntry, 10>& stack, DummyValue& value,
+ ElementVector** elements) {
+ return true;
+ }
+ inline bool finishArray(Vector<StackEntry, 10>& stack, DummyValue* vp,
+ ElementVector& elements);
+
+ inline bool errorReturn() const { return false; }
+
+ inline bool ignoreError() const { return false; }
+
+ inline void freeStackEntry(StackEntry& entry) {}
+
+ void reportError(const char* msg, const char* lineString,
+ const char* columnString);
+};
+
+template <typename CharT, typename HandlerT>
+class MOZ_STACK_CLASS JSONPerHandlerParser {
+ using ContextT = typename HandlerT::ContextT;
+
+ using Tokenizer = JSONTokenizer<CharT, JSONPerHandlerParser<CharT, HandlerT>,
+ typename HandlerT::StringBuilder>;
+
+ public:
+ using StringBuilder = typename HandlerT::StringBuilder;
+
+ public:
+ HandlerT handler;
+ Tokenizer tokenizer;
+
+ // All in progress arrays and objects being parsed, in order from outermost
+ // to innermost.
+ Vector<typename HandlerT::StackEntry, 10> stack;
+
+ public:
+ JSONPerHandlerParser(ContextT* context, mozilla::Range<const CharT> data)
+ : handler(context), tokenizer(data, this), stack(context) {}
+
+ JSONPerHandlerParser(JSONPerHandlerParser&& other) noexcept
+ : handler(std::move(other.handler)),
+ tokenizer(std::move(other.tokenizer)),
+ stack(handler.context()) {
+ tokenizer.fixupParser(this);
+ }
+
+ ~JSONPerHandlerParser();
+
+ JSONPerHandlerParser(const JSONPerHandlerParser<CharT, HandlerT>& other) =
+ delete;
+ void operator=(const JSONPerHandlerParser<CharT, HandlerT>& other) = delete;
+
+ template <typename TempValueT, typename ResultSetter>
+ inline bool parseImpl(TempValueT& value, ResultSetter setResult);
+
+ void outOfMemory();
+
+ void error(const char* msg);
+};
+
+template <typename CharT>
+class MOZ_STACK_CLASS JSONParser
+ : JSONPerHandlerParser<CharT, JSONFullParseHandler<CharT>> {
+ using Base = JSONPerHandlerParser<CharT, JSONFullParseHandler<CharT>>;
+
+ public:
+ using ParseType = JSONFullParseHandlerAnyChar::ParseType;
+
+ /* Public API */
+
+ /* Create a parser for the provided JSON data. */
+ JSONParser(JSContext* cx, mozilla::Range<const CharT> data,
+ ParseType parseType)
+ : Base(cx, data) {
+ this->handler.parseType = parseType;
+ }
+
+ /* Allow move construction for use with Rooted. */
+ JSONParser(JSONParser&& other) noexcept : Base(std::move(other)) {}
+
+ JSONParser(const JSONParser& other) = delete;
+ void operator=(const JSONParser& other) = delete;
+
+ /*
+ * Parse the JSON data specified at construction time. If it parses
+ * successfully, store the prescribed value in *vp and return true. If an
+ * internal error (e.g. OOM) occurs during parsing, return false.
+ * Otherwise, if invalid input was specifed but no internal error occurred,
+ * behavior depends upon the error handling specified at construction: if
+ * error handling is RaiseError then throw a SyntaxError and return false,
+ * otherwise return true and set *vp to |undefined|. (JSON syntax can't
+ * represent |undefined|, so the JSON data couldn't have specified it.)
+ */
+ bool parse(JS::MutableHandle<JS::Value> vp);
+
+ void trace(JSTracer* trc);
+};
+
+template <typename CharT, typename Wrapper>
+class MutableWrappedPtrOperations<JSONParser<CharT>, Wrapper>
+ : public WrappedPtrOperations<JSONParser<CharT>, Wrapper> {
+ public:
+ bool parse(JS::MutableHandle<JS::Value> vp) {
+ return static_cast<Wrapper*>(this)->get().parse(vp);
+ }
+};
+
+template <typename CharT>
+class MOZ_STACK_CLASS JSONSyntaxParser
+ : JSONPerHandlerParser<CharT, JSONSyntaxParseHandler<CharT>> {
+ using HandlerT = JSONSyntaxParseHandler<CharT>;
+ using Base = JSONPerHandlerParser<CharT, HandlerT>;
+
+ public:
+ JSONSyntaxParser(FrontendContext* fc, mozilla::Range<const CharT> data)
+ : Base(fc, data) {}
+
+ JSONSyntaxParser(JSONSyntaxParser<CharT>&& other) noexcept
+ : Base(std::move(other)) {}
+
+ JSONSyntaxParser(const JSONSyntaxParser& other) = delete;
+ void operator=(const JSONSyntaxParser& other) = delete;
+
+ bool parse();
+};
+
+} /* namespace js */
+
+#endif /* vm_JSONParser_h */
diff --git a/js/src/vm/JSONPrinter.cpp b/js/src/vm/JSONPrinter.cpp
new file mode 100644
index 0000000000..d028e0926e
--- /dev/null
+++ b/js/src/vm/JSONPrinter.cpp
@@ -0,0 +1,273 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/JSONPrinter.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/IntegerPrintfMacros.h"
+
+#include <stdarg.h>
+
+#include "jsnum.h"
+
+using namespace js;
+
+void JSONPrinter::indent() {
+ MOZ_ASSERT(indentLevel_ >= 0);
+ if (indent_) {
+ out_.putChar('\n');
+ for (int i = 0; i < indentLevel_; i++) {
+ out_.put(" ");
+ }
+ }
+}
+
+void JSONPrinter::propertyName(const char* name) {
+ if (!first_) {
+ out_.putChar(',');
+ }
+ indent();
+ out_.printf("\"%s\":", name);
+ if (indent_) {
+ out_.put(" ");
+ }
+ first_ = false;
+}
+
+void JSONPrinter::beginObject() {
+ if (!first_) {
+ out_.putChar(',');
+ }
+ indent();
+ out_.putChar('{');
+ indentLevel_++;
+ first_ = true;
+}
+
+void JSONPrinter::beginList() {
+ if (!first_) {
+ out_.putChar(',');
+ }
+ indent();
+ out_.putChar('[');
+ indentLevel_++;
+ first_ = true;
+}
+
+void JSONPrinter::beginObjectProperty(const char* name) {
+ propertyName(name);
+ out_.putChar('{');
+ indentLevel_++;
+ first_ = true;
+}
+
+void JSONPrinter::beginListProperty(const char* name) {
+ propertyName(name);
+ out_.putChar('[');
+ indentLevel_++;
+ first_ = true;
+}
+
+GenericPrinter& JSONPrinter::beginStringProperty(const char* name) {
+ propertyName(name);
+ out_.putChar('"');
+ return out_;
+}
+
+void JSONPrinter::endStringProperty() {
+ endString();
+ first_ = false;
+}
+
+GenericPrinter& JSONPrinter::beginString() {
+ if (!first_) {
+ out_.putChar(',');
+ }
+ indent();
+ out_.putChar('"');
+ return out_;
+}
+
+void JSONPrinter::endString() { out_.putChar('"'); }
+
+void JSONPrinter::boolProperty(const char* name, bool value) {
+ propertyName(name);
+ out_.put(value ? "true" : "false");
+}
+
+template <typename CharT>
+static void JSONString(GenericPrinter& out, const CharT* s, size_t length) {
+ const CharT* end = s + length;
+ for (const CharT* t = s; t < end; s = ++t) {
+ // This quote implementation is probably correct,
+ // but uses \u even when not strictly necessary.
+ char16_t c = *t;
+ if (c == '"' || c == '\\') {
+ out.printf("\\");
+ out.printf("%c", char(c));
+ } else if (!IsAsciiPrintable(c)) {
+ out.printf("\\u%04x", c);
+ } else {
+ out.printf("%c", char(c));
+ }
+ }
+}
+
+void JSONPrinter::property(const char* name, JSLinearString* str) {
+ JS::AutoCheckCannotGC nogc;
+ beginStringProperty(name);
+
+ // Limit the string length to reduce the JSON file size.
+ size_t length = std::min(str->length(), size_t(128));
+ if (str->hasLatin1Chars()) {
+ JSONString(out_, str->latin1Chars(nogc), length);
+ } else {
+ JSONString(out_, str->twoByteChars(nogc), length);
+ }
+ endStringProperty();
+}
+
+void JSONPrinter::property(const char* name, const char* value) {
+ beginStringProperty(name);
+ out_.put(value);
+ endStringProperty();
+}
+
+void JSONPrinter::formatProperty(const char* name, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+
+ beginStringProperty(name);
+ out_.vprintf(format, ap);
+ endStringProperty();
+
+ va_end(ap);
+}
+
+void JSONPrinter::formatProperty(const char* name, const char* format,
+ va_list ap) {
+ beginStringProperty(name);
+ out_.vprintf(format, ap);
+ endStringProperty();
+}
+
+void JSONPrinter::value(const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+
+ if (!first_) {
+ out_.putChar(',');
+ }
+ indent();
+ out_.putChar('"');
+ out_.vprintf(format, ap);
+ out_.putChar('"');
+
+ va_end(ap);
+ first_ = false;
+}
+
+void JSONPrinter::property(const char* name, int32_t value) {
+ propertyName(name);
+ out_.printf("%" PRId32, value);
+}
+
+void JSONPrinter::value(int val) {
+ if (!first_) {
+ out_.putChar(',');
+ }
+ indent();
+ out_.printf("%d", val);
+ first_ = false;
+}
+
+void JSONPrinter::property(const char* name, uint32_t value) {
+ propertyName(name);
+ out_.printf("%" PRIu32, value);
+}
+
+void JSONPrinter::property(const char* name, int64_t value) {
+ propertyName(name);
+ out_.printf("%" PRId64, value);
+}
+
+void JSONPrinter::property(const char* name, uint64_t value) {
+ propertyName(name);
+ out_.printf("%" PRIu64, value);
+}
+
+#if defined(XP_DARWIN) || defined(__OpenBSD__) || defined(__wasi__)
+void JSONPrinter::property(const char* name, size_t value) {
+ propertyName(name);
+ out_.printf("%zu", value);
+}
+#endif
+
+void JSONPrinter::floatProperty(const char* name, double value,
+ size_t precision) {
+ if (!std::isfinite(value)) {
+ propertyName(name);
+ out_.put("null");
+ return;
+ }
+
+ ToCStringBuf cbuf;
+ const char* str = NumberToCString(&cbuf, value);
+ MOZ_ASSERT(str);
+
+ property(name, str);
+}
+
+void JSONPrinter::property(const char* name, const mozilla::TimeDuration& dur,
+ TimePrecision precision) {
+ if (precision == MICROSECONDS) {
+ property(name, static_cast<int64_t>(dur.ToMicroseconds()));
+ return;
+ }
+
+ propertyName(name);
+ lldiv_t split;
+ switch (precision) {
+ case SECONDS:
+ split = lldiv(static_cast<int64_t>(dur.ToMilliseconds()), 1000);
+ break;
+ case MILLISECONDS:
+ split = lldiv(static_cast<int64_t>(dur.ToMicroseconds()), 1000);
+ break;
+ case MICROSECONDS:
+ MOZ_ASSERT_UNREACHABLE("");
+ };
+ out_.printf("%lld.%03lld", split.quot, split.rem);
+}
+
+void JSONPrinter::nullProperty(const char* name) {
+ propertyName(name);
+ out_.put("null");
+}
+
+void JSONPrinter::nullValue() {
+ if (!first_) {
+ out_.putChar(',');
+ }
+ indent();
+ out_.put("null");
+ first_ = false;
+}
+
+void JSONPrinter::endObject() {
+ indentLevel_--;
+ indent();
+ out_.putChar('}');
+ first_ = false;
+}
+
+void JSONPrinter::endList() {
+ indentLevel_--;
+ indent();
+ out_.putChar(']');
+ first_ = false;
+}
diff --git a/js/src/vm/JSONPrinter.h b/js/src/vm/JSONPrinter.h
new file mode 100644
index 0000000000..cdfac127ae
--- /dev/null
+++ b/js/src/vm/JSONPrinter.h
@@ -0,0 +1,93 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_JSONPrinter_h
+#define vm_JSONPrinter_h
+
+#include "mozilla/TimeStamp.h"
+
+#include <stdio.h>
+
+#include "js/Printer.h"
+#include "js/TypeDecls.h"
+
+class JSLinearString;
+
+namespace js {
+
+class JSONPrinter {
+ protected:
+ int indentLevel_;
+ bool indent_;
+ bool first_;
+ GenericPrinter& out_;
+
+ void indent();
+
+ public:
+ explicit JSONPrinter(GenericPrinter& out, bool indent = true)
+ : indentLevel_(0), indent_(indent), first_(true), out_(out) {}
+
+ void setIndentLevel(int indentLevel) { indentLevel_ = indentLevel; }
+
+ void beginObject();
+ void beginList();
+ void beginObjectProperty(const char* name);
+ void beginListProperty(const char* name);
+
+ void value(const char* format, ...) MOZ_FORMAT_PRINTF(2, 3);
+ void value(int value);
+
+ void boolProperty(const char* name, bool value);
+
+ void property(const char* name, JSLinearString* value);
+ void property(const char* name, const char* value);
+ void property(const char* name, int32_t value);
+ void property(const char* name, uint32_t value);
+ void property(const char* name, int64_t value);
+ void property(const char* name, uint64_t value);
+#if defined(XP_DARWIN) || defined(__OpenBSD__) || defined(__wasi__)
+ // On OSX and OpenBSD, size_t is long unsigned, uint32_t is unsigned, and
+ // uint64_t is long long unsigned. Everywhere else, size_t matches either
+ // uint32_t or uint64_t.
+ void property(const char* name, size_t value);
+#endif
+
+ void formatProperty(const char* name, const char* format, ...)
+ MOZ_FORMAT_PRINTF(3, 4);
+ void formatProperty(const char* name, const char* format, va_list ap);
+
+ // JSON requires decimals to be separated by periods, but the LC_NUMERIC
+ // setting may cause printf to use commas in some locales.
+ enum TimePrecision { SECONDS, MILLISECONDS, MICROSECONDS };
+ void property(const char* name, const mozilla::TimeDuration& dur,
+ TimePrecision precision);
+
+ void floatProperty(const char* name, double value, size_t precision);
+
+ GenericPrinter& beginStringProperty(const char* name);
+ void endStringProperty();
+
+ GenericPrinter& beginString();
+ void endString();
+
+ void nullProperty(const char* name);
+ void nullValue();
+
+ void endObject();
+ void endList();
+
+ // Notify the output that the caller has detected OOM and should transition
+ // to its saw-OOM state.
+ void outOfMemory() { out_.reportOutOfMemory(); }
+
+ protected:
+ void propertyName(const char* name);
+};
+
+} // namespace js
+
+#endif /* vm_JSONPrinter_h */
diff --git a/js/src/vm/JSObject-inl.h b/js/src/vm/JSObject-inl.h
new file mode 100644
index 0000000000..ea626eed3d
--- /dev/null
+++ b/js/src/vm/JSObject-inl.h
@@ -0,0 +1,597 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_JSObject_inl_h
+#define vm_JSObject_inl_h
+
+#include "vm/JSObject.h"
+
+#include "js/Object.h" // JS::GetBuiltinClass
+#include "vm/ArrayObject.h"
+#include "vm/BoundFunctionObject.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/JSFunction.h"
+#include "vm/Probes.h"
+#include "vm/PropertyResult.h"
+#include "vm/TypedArrayObject.h"
+
+#ifdef ENABLE_RECORD_TUPLE
+# include "vm/TupleType.h"
+#endif
+
+#include "gc/GCContext-inl.h"
+#include "gc/ObjectKind-inl.h"
+#include "vm/ObjectOperations-inl.h" // js::MaybeHasInterestingSymbolProperty
+
+namespace js {
+
+#ifdef ENABLE_RECORD_TUPLE
+// Defined in vm/RecordTupleShared.{h,cpp}. We cannot include that file
+// because it causes circular dependencies.
+extern bool IsExtendedPrimitiveWrapper(const JSObject& obj);
+#endif
+
+// Get the GC kind to use for scripted 'new', empty object literals ({}), and
+// the |Object| constructor.
+static inline gc::AllocKind NewObjectGCKind() { return gc::AllocKind::OBJECT4; }
+
+} // namespace js
+
+MOZ_ALWAYS_INLINE uint32_t js::NativeObject::numDynamicSlots() const {
+ uint32_t slots = getSlotsHeader()->capacity();
+ MOZ_ASSERT(slots == calculateDynamicSlots());
+ MOZ_ASSERT_IF(hasDynamicSlots() && !hasUniqueId(), slots != 0);
+
+ return slots;
+}
+
+MOZ_ALWAYS_INLINE uint32_t js::NativeObject::calculateDynamicSlots() const {
+ return calculateDynamicSlots(numFixedSlots(), slotSpan(), getClass());
+}
+
+/* static */ MOZ_ALWAYS_INLINE uint32_t js::NativeObject::calculateDynamicSlots(
+ uint32_t nfixed, uint32_t span, const JSClass* clasp) {
+ if (span <= nfixed) {
+ return 0;
+ }
+
+ uint32_t ndynamic = span - nfixed;
+
+ // Increase the slots to SLOT_CAPACITY_MIN to decrease the likelihood
+ // the dynamic slots need to get increased again. ArrayObjects ignore
+ // this because slots are uncommon in that case.
+ if (clasp != &ArrayObject::class_ && ndynamic <= SLOT_CAPACITY_MIN) {
+ return SLOT_CAPACITY_MIN;
+ }
+
+ uint32_t count =
+ mozilla::RoundUpPow2(ndynamic + ObjectSlots::VALUES_PER_HEADER);
+
+ uint32_t slots = count - ObjectSlots::VALUES_PER_HEADER;
+ MOZ_ASSERT(slots >= ndynamic);
+ return slots;
+}
+
+/* static */ MOZ_ALWAYS_INLINE uint32_t
+js::NativeObject::calculateDynamicSlots(SharedShape* shape) {
+ return calculateDynamicSlots(shape->numFixedSlots(), shape->slotSpan(),
+ shape->getObjectClass());
+}
+
+inline void JSObject::finalize(JS::GCContext* gcx) {
+ js::probes::FinalizeObject(this);
+
+#ifdef DEBUG
+ MOZ_ASSERT(isTenured());
+ if (!IsBackgroundFinalized(asTenured().getAllocKind())) {
+ /* Assert we're on the main thread. */
+ MOZ_ASSERT(CurrentThreadCanAccessZone(zone()));
+ }
+#endif
+
+ js::Shape* objShape = shape();
+
+ const JSClass* clasp = objShape->getObjectClass();
+ if (clasp->hasFinalize()) {
+ clasp->doFinalize(gcx, this);
+ }
+
+ if (!objShape->isNative()) {
+ return;
+ }
+
+ js::NativeObject* nobj = &as<js::NativeObject>();
+ if (nobj->hasDynamicSlots()) {
+ js::ObjectSlots* slotsHeader = nobj->getSlotsHeader();
+ size_t size = js::ObjectSlots::allocSize(slotsHeader->capacity());
+ gcx->free_(this, slotsHeader, size, js::MemoryUse::ObjectSlots);
+ }
+
+ if (nobj->hasDynamicElements()) {
+ js::ObjectElements* elements = nobj->getElementsHeader();
+ size_t size = elements->numAllocatedElements() * sizeof(js::HeapSlot);
+ gcx->free_(this, nobj->getUnshiftedElementsHeader(), size,
+ js::MemoryUse::ObjectElements);
+ }
+}
+
+inline bool JSObject::isQualifiedVarObj() const {
+ if (is<js::DebugEnvironmentProxy>()) {
+ return as<js::DebugEnvironmentProxy>().environment().isQualifiedVarObj();
+ }
+ bool rv = hasFlag(js::ObjectFlag::QualifiedVarObj);
+ MOZ_ASSERT_IF(rv, is<js::GlobalObject>() || is<js::CallObject>() ||
+ is<js::VarEnvironmentObject>() ||
+ is<js::ModuleEnvironmentObject>() ||
+ is<js::NonSyntacticVariablesObject>() ||
+ (is<js::WithEnvironmentObject>() &&
+ !as<js::WithEnvironmentObject>().isSyntactic()));
+ return rv;
+}
+
+inline bool JSObject::isUnqualifiedVarObj() const {
+ if (is<js::DebugEnvironmentProxy>()) {
+ return as<js::DebugEnvironmentProxy>().environment().isUnqualifiedVarObj();
+ }
+ return is<js::GlobalObject>() || is<js::NonSyntacticVariablesObject>();
+}
+
+inline bool JSObject::canHaveFixedElements() const {
+ return (is<js::ArrayObject>() || IF_RECORD_TUPLE(is<js::TupleType>(), false));
+}
+
+namespace js {
+
+#ifdef DEBUG
+inline bool ClassCanHaveFixedData(const JSClass* clasp) {
+ // Normally, the number of fixed slots given an object is the maximum
+ // permitted for its size class. For array buffers and non-shared typed
+ // arrays we only use enough to cover the class reserved slots, so that
+ // the remaining space in the object's allocation is available for the
+ // buffer's data.
+ return !clasp->isNativeObject() || clasp == &js::ArrayBufferObject::class_ ||
+ js::IsTypedArrayClass(clasp);
+}
+#endif
+
+class MOZ_RAII AutoSuppressAllocationMetadataBuilder {
+ JS::Zone* zone;
+ bool saved;
+
+ public:
+ explicit AutoSuppressAllocationMetadataBuilder(JSContext* cx)
+ : zone(cx->zone()), saved(zone->suppressAllocationMetadataBuilder) {
+ zone->suppressAllocationMetadataBuilder = true;
+ }
+
+ ~AutoSuppressAllocationMetadataBuilder() {
+ zone->suppressAllocationMetadataBuilder = saved;
+ }
+};
+
+// This function is meant to be called from allocation fast paths.
+//
+// If we do have an allocation metadata builder, it can cause a GC, so the
+// object must be rooted. The usual way to do this would be to make our callers
+// pass a HandleObject, but that would require them to pay the cost of rooting
+// the object unconditionally, even though collecting metadata is rare. Instead,
+// SetNewObjectMetadata's contract is that the caller must use the pointer
+// returned in place of the pointer passed. If a GC occurs, the returned pointer
+// may be the passed pointer, relocated by GC. If no GC could occur, it's just
+// passed through. We root nothing unless necessary.
+template <typename T>
+[[nodiscard]] static inline T* SetNewObjectMetadata(JSContext* cx, T* obj) {
+ MOZ_ASSERT(cx->isMainThreadContext());
+ MOZ_ASSERT(cx->realm()->hasAllocationMetadataBuilder());
+ MOZ_ASSERT(!cx->realm()->hasObjectPendingMetadata());
+
+ // The metadata builder is invoked for each object created on the main thread,
+ // except when it's suppressed.
+ if (!cx->zone()->suppressAllocationMetadataBuilder) {
+ // Don't collect metadata on objects that represent metadata, to avoid
+ // recursion.
+ AutoSuppressAllocationMetadataBuilder suppressMetadata(cx);
+
+ Rooted<T*> rooted(cx, obj);
+ cx->realm()->setNewObjectMetadata(cx, rooted);
+ return rooted;
+ }
+
+ return obj;
+}
+
+} // namespace js
+
+inline js::GlobalObject& JSObject::nonCCWGlobal() const {
+ /*
+ * The global is read-barriered so that it is kept live by access through
+ * the Realm. When accessed through a JSObject, however, the global will be
+ * already kept live by the black JSObject's group pointer, so does not
+ * need to be read-barriered.
+ */
+ return *nonCCWRealm()->unsafeUnbarrieredMaybeGlobal();
+}
+
+inline bool JSObject::nonProxyIsExtensible() const {
+ MOZ_ASSERT(!uninlinedIsProxyObject());
+
+#ifdef ENABLE_RECORD_TUPLE
+ if (js::IsExtendedPrimitiveWrapper(*this)) {
+ return false;
+ }
+#endif
+ // [[Extensible]] for ordinary non-proxy objects is an object flag.
+ return !hasFlag(js::ObjectFlag::NotExtensible);
+}
+
+inline bool JSObject::hasInvalidatedTeleporting() const {
+ return hasFlag(js::ObjectFlag::InvalidatedTeleporting);
+}
+
+MOZ_ALWAYS_INLINE bool JSObject::maybeHasInterestingSymbolProperty() const {
+ if (is<js::NativeObject>()) {
+ return as<js::NativeObject>().hasInterestingSymbol();
+ }
+ return true;
+}
+
+inline bool JSObject::staticPrototypeIsImmutable() const {
+ MOZ_ASSERT(hasStaticPrototype());
+ return hasFlag(js::ObjectFlag::ImmutablePrototype);
+}
+
+namespace js {
+
+static MOZ_ALWAYS_INLINE bool IsFunctionObject(const js::Value& v) {
+ return v.isObject() && v.toObject().is<JSFunction>();
+}
+
+static MOZ_ALWAYS_INLINE bool IsFunctionObject(const js::Value& v,
+ JSFunction** fun) {
+ if (v.isObject() && v.toObject().is<JSFunction>()) {
+ *fun = &v.toObject().as<JSFunction>();
+ return true;
+ }
+ return false;
+}
+
+static MOZ_ALWAYS_INLINE bool IsNativeFunction(const js::Value& v,
+ JSNative native) {
+ JSFunction* fun;
+ return IsFunctionObject(v, &fun) && fun->maybeNative() == native;
+}
+
+static MOZ_ALWAYS_INLINE bool IsNativeFunction(const JSObject* obj,
+ JSNative native) {
+ return obj->is<JSFunction>() && obj->as<JSFunction>().maybeNative() == native;
+}
+
+// Return whether looking up a method on 'obj' definitely resolves to the
+// original specified native function. The method may conservatively return
+// 'false' in the case of proxies or other non-native objects.
+static MOZ_ALWAYS_INLINE bool HasNativeMethodPure(JSObject* obj,
+ PropertyName* name,
+ JSNative native,
+ JSContext* cx) {
+ Value v;
+ if (!GetPropertyPure(cx, obj, NameToId(name), &v)) {
+ return false;
+ }
+
+ return IsNativeFunction(v, native);
+}
+
+// Return whether 'obj' definitely has no @@toPrimitive method.
+static MOZ_ALWAYS_INLINE bool HasNoToPrimitiveMethodPure(JSObject* obj,
+ JSContext* cx) {
+ JS::Symbol* toPrimitive = cx->wellKnownSymbols().toPrimitive;
+ JSObject* holder;
+ if (!MaybeHasInterestingSymbolProperty(cx, obj, toPrimitive, &holder)) {
+#ifdef DEBUG
+ NativeObject* pobj;
+ PropertyResult prop;
+ MOZ_ASSERT(LookupPropertyPure(cx, obj, PropertyKey::Symbol(toPrimitive),
+ &pobj, &prop));
+ MOZ_ASSERT(prop.isNotFound());
+#endif
+ return true;
+ }
+
+ NativeObject* pobj;
+ PropertyResult prop;
+ if (!LookupPropertyPure(cx, holder, PropertyKey::Symbol(toPrimitive), &pobj,
+ &prop)) {
+ return false;
+ }
+
+ return prop.isNotFound();
+}
+
+extern bool ToPropertyKeySlow(JSContext* cx, HandleValue argument,
+ MutableHandleId result);
+
+/* ES6 draft rev 28 (2014 Oct 14) 7.1.14 */
+MOZ_ALWAYS_INLINE bool ToPropertyKey(JSContext* cx, HandleValue argument,
+ MutableHandleId result) {
+ if (MOZ_LIKELY(argument.isPrimitive())) {
+ return PrimitiveValueToId<CanGC>(cx, argument, result);
+ }
+
+ return ToPropertyKeySlow(cx, argument, result);
+}
+
+/*
+ * Return true if this is a compiler-created internal function accessed by
+ * its own object. Such a function object must not be accessible to script
+ * or embedding code.
+ */
+inline bool IsInternalFunctionObject(JSObject& funobj) {
+ JSFunction& fun = funobj.as<JSFunction>();
+ return fun.isInterpreted() && !fun.environment();
+}
+
+inline gc::Heap GetInitialHeap(NewObjectKind newKind, const JSClass* clasp,
+ gc::AllocSite* site = nullptr) {
+ if (newKind != GenericObject) {
+ return gc::Heap::Tenured;
+ }
+ if (clasp->hasFinalize() && !CanNurseryAllocateFinalizedClass(clasp)) {
+ return gc::Heap::Tenured;
+ }
+ if (site) {
+ return site->initialHeap();
+ }
+ return gc::Heap::Default;
+}
+
+/*
+ * Make an object with the specified prototype. If parent is null, it will
+ * default to the prototype's global if the prototype is non-null.
+ */
+NativeObject* NewObjectWithGivenTaggedProto(JSContext* cx, const JSClass* clasp,
+ Handle<TaggedProto> proto,
+ gc::AllocKind allocKind,
+ NewObjectKind newKind);
+
+template <NewObjectKind NewKind>
+inline NativeObject* NewObjectWithGivenTaggedProto(JSContext* cx,
+ const JSClass* clasp,
+ Handle<TaggedProto> proto) {
+ gc::AllocKind allocKind = gc::GetGCObjectKind(clasp);
+ return NewObjectWithGivenTaggedProto(cx, clasp, proto, allocKind, NewKind);
+}
+
+namespace detail {
+
+template <typename T, NewObjectKind NewKind>
+inline T* NewObjectWithGivenTaggedProtoForKind(JSContext* cx,
+ Handle<TaggedProto> proto) {
+ JSObject* obj = NewObjectWithGivenTaggedProto<NewKind>(cx, &T::class_, proto);
+ return obj ? &obj->as<T>() : nullptr;
+}
+
+} // namespace detail
+
+template <typename T>
+inline T* NewObjectWithGivenTaggedProto(JSContext* cx,
+ Handle<TaggedProto> proto) {
+ return detail::NewObjectWithGivenTaggedProtoForKind<T, GenericObject>(cx,
+ proto);
+}
+
+inline NativeObject* NewObjectWithGivenProto(
+ JSContext* cx, const JSClass* clasp, HandleObject proto,
+ gc::AllocKind allocKind, NewObjectKind newKind = GenericObject) {
+ return NewObjectWithGivenTaggedProto(cx, clasp, AsTaggedProto(proto),
+ allocKind, newKind);
+}
+
+inline NativeObject* NewObjectWithGivenProto(JSContext* cx,
+ const JSClass* clasp,
+ HandleObject proto) {
+ return NewObjectWithGivenTaggedProto<GenericObject>(cx, clasp,
+ AsTaggedProto(proto));
+}
+
+inline NativeObject* NewTenuredObjectWithGivenProto(JSContext* cx,
+ const JSClass* clasp,
+ HandleObject proto) {
+ return NewObjectWithGivenTaggedProto<TenuredObject>(cx, clasp,
+ AsTaggedProto(proto));
+}
+
+template <typename T>
+inline T* NewObjectWithGivenProto(JSContext* cx, HandleObject proto) {
+ return detail::NewObjectWithGivenTaggedProtoForKind<T, GenericObject>(
+ cx, AsTaggedProto(proto));
+}
+
+template <typename T>
+inline T* NewTenuredObjectWithGivenProto(JSContext* cx, HandleObject proto) {
+ return detail::NewObjectWithGivenTaggedProtoForKind<T, TenuredObject>(
+ cx, AsTaggedProto(proto));
+}
+
+template <typename T>
+inline T* NewObjectWithGivenProtoAndKinds(JSContext* cx, HandleObject proto,
+ gc::AllocKind allocKind,
+ NewObjectKind newKind) {
+ JSObject* obj = NewObjectWithGivenTaggedProto(
+ cx, &T::class_, AsTaggedProto(proto), allocKind, newKind);
+ return obj ? &obj->as<T>() : nullptr;
+}
+
+// Make an object with the prototype set according to the cached prototype or
+// Object.prototype.
+NativeObject* NewObjectWithClassProto(JSContext* cx, const JSClass* clasp,
+ HandleObject proto,
+ gc::AllocKind allocKind,
+ NewObjectKind newKind = GenericObject);
+
+inline NativeObject* NewObjectWithClassProto(
+ JSContext* cx, const JSClass* clasp, HandleObject proto,
+ NewObjectKind newKind = GenericObject) {
+ gc::AllocKind allocKind = gc::GetGCObjectKind(clasp);
+ return NewObjectWithClassProto(cx, clasp, proto, allocKind, newKind);
+}
+
+template <class T>
+inline T* NewObjectWithClassProto(JSContext* cx, HandleObject proto) {
+ JSObject* obj = NewObjectWithClassProto(cx, &T::class_, proto, GenericObject);
+ return obj ? &obj->as<T>() : nullptr;
+}
+
+template <class T>
+inline T* NewObjectWithClassProtoAndKind(JSContext* cx, HandleObject proto,
+ NewObjectKind newKind) {
+ JSObject* obj = NewObjectWithClassProto(cx, &T::class_, proto, newKind);
+ return obj ? &obj->as<T>() : nullptr;
+}
+
+template <class T>
+inline T* NewObjectWithClassProto(JSContext* cx, HandleObject proto,
+ gc::AllocKind allocKind,
+ NewObjectKind newKind = GenericObject) {
+ NativeObject* obj =
+ NewObjectWithClassProto(cx, &T::class_, proto, allocKind, newKind);
+ return obj ? &obj->as<T>() : nullptr;
+}
+
+/*
+ * Create a native instance of the given class with parent and proto set
+ * according to the context's active global.
+ */
+inline NativeObject* NewBuiltinClassInstance(
+ JSContext* cx, const JSClass* clasp, gc::AllocKind allocKind,
+ NewObjectKind newKind = GenericObject) {
+ return NewObjectWithClassProto(cx, clasp, nullptr, allocKind, newKind);
+}
+
+inline NativeObject* NewBuiltinClassInstance(
+ JSContext* cx, const JSClass* clasp,
+ NewObjectKind newKind = GenericObject) {
+ gc::AllocKind allocKind = gc::GetGCObjectKind(clasp);
+ return NewBuiltinClassInstance(cx, clasp, allocKind, newKind);
+}
+
+template <typename T>
+inline T* NewBuiltinClassInstance(JSContext* cx) {
+ JSObject* obj = NewBuiltinClassInstance(cx, &T::class_, GenericObject);
+ return obj ? &obj->as<T>() : nullptr;
+}
+
+template <typename T>
+inline T* NewTenuredBuiltinClassInstance(JSContext* cx) {
+ JSObject* obj = NewBuiltinClassInstance(cx, &T::class_, TenuredObject);
+ return obj ? &obj->as<T>() : nullptr;
+}
+
+template <typename T>
+inline T* NewBuiltinClassInstanceWithKind(JSContext* cx,
+ NewObjectKind newKind) {
+ JSObject* obj = NewBuiltinClassInstance(cx, &T::class_, newKind);
+ return obj ? &obj->as<T>() : nullptr;
+}
+
+template <typename T>
+inline T* NewBuiltinClassInstance(JSContext* cx, gc::AllocKind allocKind,
+ NewObjectKind newKind = GenericObject) {
+ JSObject* obj = NewBuiltinClassInstance(cx, &T::class_, allocKind, newKind);
+ return obj ? &obj->as<T>() : nullptr;
+}
+
+static inline gc::AllocKind GuessArrayGCKind(size_t numElements) {
+ if (numElements) {
+ return gc::GetGCArrayKind(numElements);
+ }
+ return gc::AllocKind::OBJECT8;
+}
+
+// Returns ESClass::Other if the value isn't an object, or if the object
+// isn't of one of the enumerated classes. Otherwise returns the appropriate
+// class.
+inline bool GetClassOfValue(JSContext* cx, HandleValue v, ESClass* cls) {
+ if (!v.isObject()) {
+ *cls = ESClass::Other;
+ return true;
+ }
+
+ RootedObject obj(cx, &v.toObject());
+ return JS::GetBuiltinClass(cx, obj, cls);
+}
+
+extern NativeObject* InitClass(
+ JSContext* cx, HandleObject obj, const JSClass* protoClass,
+ HandleObject protoProto, const char* name, JSNative constructor,
+ unsigned nargs, const JSPropertySpec* ps, const JSFunctionSpec* fs,
+ const JSPropertySpec* static_ps, const JSFunctionSpec* static_fs,
+ NativeObject** ctorp = nullptr);
+
+MOZ_ALWAYS_INLINE const char* GetObjectClassName(JSContext* cx,
+ HandleObject obj) {
+ if (obj->is<ProxyObject>()) {
+ return Proxy::className(cx, obj);
+ }
+
+ return obj->getClass()->name;
+}
+
+inline bool IsCallable(const Value& v) {
+ return v.isObject() && v.toObject().isCallable();
+}
+
+// ES6 rev 24 (2014 April 27) 7.2.5 IsConstructor
+inline bool IsConstructor(const Value& v) {
+ return v.isObject() && v.toObject().isConstructor();
+}
+
+static inline bool MaybePreserveDOMWrapper(JSContext* cx, HandleObject obj) {
+ if (!obj->getClass()->isDOMClass()) {
+ return true;
+ }
+
+ MOZ_ASSERT(cx->runtime()->preserveWrapperCallback);
+ return cx->runtime()->preserveWrapperCallback(cx, obj);
+}
+
+} /* namespace js */
+
+MOZ_ALWAYS_INLINE bool JSObject::isCallable() const {
+ if (is<JSFunction>()) {
+ return true;
+ }
+ if (is<js::ProxyObject>()) {
+ const js::ProxyObject& p = as<js::ProxyObject>();
+ return p.handler()->isCallable(const_cast<JSObject*>(this));
+ }
+ return callHook() != nullptr;
+}
+
+MOZ_ALWAYS_INLINE bool JSObject::isConstructor() const {
+ if (is<JSFunction>()) {
+ const JSFunction& fun = as<JSFunction>();
+ return fun.isConstructor();
+ }
+ if (is<js::BoundFunctionObject>()) {
+ const js::BoundFunctionObject& bound = as<js::BoundFunctionObject>();
+ return bound.isConstructor();
+ }
+ if (is<js::ProxyObject>()) {
+ const js::ProxyObject& p = as<js::ProxyObject>();
+ return p.handler()->isConstructor(const_cast<JSObject*>(this));
+ }
+ return constructHook() != nullptr;
+}
+
+MOZ_ALWAYS_INLINE JSNative JSObject::callHook() const {
+ return getClass()->getCall();
+}
+
+MOZ_ALWAYS_INLINE JSNative JSObject::constructHook() const {
+ return getClass()->getConstruct();
+}
+
+#endif /* vm_JSObject_inl_h */
diff --git a/js/src/vm/JSObject.cpp b/js/src/vm/JSObject.cpp
new file mode 100644
index 0000000000..292971cf3e
--- /dev/null
+++ b/js/src/vm/JSObject.cpp
@@ -0,0 +1,3649 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * JS object implementation.
+ */
+
+#include "vm/JSObject-inl.h"
+
+#include "mozilla/MemoryReporting.h"
+
+#include <string.h>
+
+#include "jsapi.h"
+#include "jsexn.h"
+#include "jsfriendapi.h"
+#include "jsnum.h"
+#include "jstypes.h"
+
+#include "builtin/BigInt.h"
+#include "builtin/MapObject.h"
+#include "builtin/Object.h"
+#include "builtin/String.h"
+#include "builtin/Symbol.h"
+#include "builtin/WeakSetObject.h"
+#include "gc/AllocKind.h"
+#include "gc/GC.h"
+#include "js/CharacterEncoding.h"
+#include "js/friend/DumpFunctions.h" // js::DumpObject
+#include "js/friend/ErrorMessages.h" // JSErrNum, js::GetErrorMessage, JSMSG_*
+#include "js/friend/WindowProxy.h" // js::IsWindow, js::ToWindowProxyIfWindow
+#include "js/MemoryMetrics.h"
+#include "js/PropertyDescriptor.h" // JS::FromPropertyDescriptor
+#include "js/PropertySpec.h" // JSPropertySpec
+#include "js/Proxy.h"
+#include "js/Result.h"
+#include "js/UbiNode.h"
+#include "js/Wrapper.h"
+#include "proxy/DeadObjectProxy.h"
+#include "util/Memory.h"
+#include "util/Text.h"
+#include "util/WindowsWrapper.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/BytecodeUtil.h"
+#include "vm/Compartment.h"
+#include "vm/DateObject.h"
+#include "vm/Interpreter.h"
+#include "vm/Iteration.h"
+#include "vm/JSAtom.h"
+#include "vm/JSContext.h"
+#include "vm/JSFunction.h"
+#include "vm/JSScript.h"
+#include "vm/ProxyObject.h"
+#include "vm/RegExpObject.h"
+#include "vm/Shape.h"
+#include "vm/TypedArrayObject.h"
+#include "vm/Watchtower.h"
+#include "vm/WellKnownAtom.h" // js_*_str
+#include "vm/WrapperObject.h"
+#ifdef ENABLE_RECORD_TUPLE
+# include "builtin/RecordObject.h"
+# include "builtin/TupleObject.h"
+# include "vm/RecordType.h"
+# include "vm/TupleType.h"
+#endif
+#include "wasm/WasmGcObject.h"
+
+#include "gc/StableCellHasher-inl.h"
+#include "vm/BooleanObject-inl.h"
+#include "vm/EnvironmentObject-inl.h"
+#include "vm/Interpreter-inl.h"
+#include "vm/JSAtom-inl.h"
+#include "vm/JSContext-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/NumberObject-inl.h"
+#include "vm/ObjectFlags-inl.h"
+#include "vm/Realm-inl.h"
+#include "vm/StringObject-inl.h"
+#include "vm/TypedArrayObject-inl.h"
+
+using namespace js;
+
+using mozilla::Maybe;
+
+void js::ReportNotObject(JSContext* cx, JSErrNum err, int spindex,
+ HandleValue v) {
+ MOZ_ASSERT(!v.isObject());
+ ReportValueError(cx, err, spindex, v, nullptr);
+}
+
+void js::ReportNotObject(JSContext* cx, JSErrNum err, HandleValue v) {
+ ReportNotObject(cx, err, JSDVG_SEARCH_STACK, v);
+}
+
+void js::ReportNotObject(JSContext* cx, const Value& v) {
+ RootedValue value(cx, v);
+ ReportNotObject(cx, JSMSG_OBJECT_REQUIRED, value);
+}
+
+void js::ReportNotObjectArg(JSContext* cx, const char* nth, const char* fun,
+ HandleValue v) {
+ MOZ_ASSERT(!v.isObject());
+
+ UniqueChars bytes;
+ if (const char* chars = ValueToSourceForError(cx, v, bytes)) {
+ JS_ReportErrorNumberLatin1(cx, GetErrorMessage, nullptr,
+ JSMSG_OBJECT_REQUIRED_ARG, nth, fun, chars);
+ }
+}
+
+JS_PUBLIC_API const char* JS::InformalValueTypeName(const Value& v) {
+ switch (v.type()) {
+ case ValueType::Double:
+ case ValueType::Int32:
+ return "number";
+ case ValueType::Boolean:
+ return "boolean";
+ case ValueType::Undefined:
+ return "undefined";
+ case ValueType::Null:
+ return "null";
+ case ValueType::String:
+ return "string";
+ case ValueType::Symbol:
+ return "symbol";
+ case ValueType::BigInt:
+ return "bigint";
+ case ValueType::Object:
+#ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive:
+#endif
+ return v.getObjectPayload().getClass()->name;
+ case ValueType::Magic:
+ return "magic";
+ case ValueType::PrivateGCThing:
+ break;
+ }
+
+ MOZ_CRASH("unexpected type");
+}
+
+// ES6 draft rev37 6.2.4.4 FromPropertyDescriptor
+JS_PUBLIC_API bool JS::FromPropertyDescriptor(
+ JSContext* cx, Handle<Maybe<PropertyDescriptor>> desc_,
+ MutableHandleValue vp) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(desc_);
+
+ // Step 1.
+ if (desc_.isNothing()) {
+ vp.setUndefined();
+ return true;
+ }
+
+ Rooted<PropertyDescriptor> desc(cx, *desc_);
+ return FromPropertyDescriptorToObject(cx, desc, vp);
+}
+
+bool js::FromPropertyDescriptorToObject(JSContext* cx,
+ Handle<PropertyDescriptor> desc,
+ MutableHandleValue vp) {
+ // Step 2-3.
+ RootedObject obj(cx, NewPlainObject(cx));
+ if (!obj) {
+ return false;
+ }
+
+ const JSAtomState& names = cx->names();
+
+ // Step 4.
+ if (desc.hasValue()) {
+ if (!DefineDataProperty(cx, obj, names.value, desc.value())) {
+ return false;
+ }
+ }
+
+ // Step 5.
+ RootedValue v(cx);
+ if (desc.hasWritable()) {
+ v.setBoolean(desc.writable());
+ if (!DefineDataProperty(cx, obj, names.writable, v)) {
+ return false;
+ }
+ }
+
+ // Step 6.
+ if (desc.hasGetter()) {
+ if (JSObject* get = desc.getter()) {
+ v.setObject(*get);
+ } else {
+ v.setUndefined();
+ }
+ if (!DefineDataProperty(cx, obj, names.get, v)) {
+ return false;
+ }
+ }
+
+ // Step 7.
+ if (desc.hasSetter()) {
+ if (JSObject* set = desc.setter()) {
+ v.setObject(*set);
+ } else {
+ v.setUndefined();
+ }
+ if (!DefineDataProperty(cx, obj, names.set, v)) {
+ return false;
+ }
+ }
+
+ // Step 8.
+ if (desc.hasEnumerable()) {
+ v.setBoolean(desc.enumerable());
+ if (!DefineDataProperty(cx, obj, names.enumerable, v)) {
+ return false;
+ }
+ }
+
+ // Step 9.
+ if (desc.hasConfigurable()) {
+ v.setBoolean(desc.configurable());
+ if (!DefineDataProperty(cx, obj, names.configurable, v)) {
+ return false;
+ }
+ }
+
+ vp.setObject(*obj);
+ return true;
+}
+
+bool js::GetFirstArgumentAsObject(JSContext* cx, const CallArgs& args,
+ const char* method,
+ MutableHandleObject objp) {
+ if (!args.requireAtLeast(cx, method, 1)) {
+ return false;
+ }
+
+ HandleValue v = args[0];
+ if (!v.isObject()) {
+ UniqueChars bytes =
+ DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, v, nullptr);
+ if (!bytes) {
+ return false;
+ }
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_UNEXPECTED_TYPE, bytes.get(),
+ "not an object");
+ return false;
+ }
+
+ objp.set(&v.toObject());
+ return true;
+}
+
+static bool GetPropertyIfPresent(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleValue vp, bool* foundp) {
+ if (!HasProperty(cx, obj, id, foundp)) {
+ return false;
+ }
+ if (!*foundp) {
+ vp.setUndefined();
+ return true;
+ }
+
+ return GetProperty(cx, obj, obj, id, vp);
+}
+
+bool js::Throw(JSContext* cx, HandleId id, unsigned errorNumber,
+ const char* details) {
+ MOZ_ASSERT(js_ErrorFormatString[errorNumber].argCount == (details ? 2 : 1));
+ MOZ_ASSERT_IF(details, JS::StringIsASCII(details));
+
+ UniqueChars bytes =
+ IdToPrintableUTF8(cx, id, IdToPrintableBehavior::IdIsPropertyKey);
+ if (!bytes) {
+ return false;
+ }
+
+ if (details) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber,
+ bytes.get(), details);
+ } else {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber,
+ bytes.get());
+ }
+
+ return false;
+}
+
+/*** PropertyDescriptor operations and DefineProperties *********************/
+
+#ifndef ENABLE_DECORATORS
+// These are defined by CommonPropertyNames.h and WellKnownAtom.{cpp,h}
+// when decorators are enabled.
+static const char js_getter_str[] = "getter";
+static const char js_setter_str[] = "setter";
+#endif
+
+static Result<> CheckCallable(JSContext* cx, JSObject* obj,
+ const char* fieldName) {
+ if (obj && !obj->isCallable()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_GET_SET_FIELD, fieldName);
+ return cx->alreadyReportedError();
+ }
+ return Ok();
+}
+
+// 6.2.5.5 ToPropertyDescriptor(Obj)
+bool js::ToPropertyDescriptor(JSContext* cx, HandleValue descval,
+ bool checkAccessors,
+ MutableHandle<PropertyDescriptor> desc_) {
+ // Step 1.
+ RootedObject obj(cx,
+ RequireObject(cx, JSMSG_OBJECT_REQUIRED_PROP_DESC, descval));
+ if (!obj) {
+ return false;
+ }
+
+ // Step 2.
+ Rooted<PropertyDescriptor> desc(cx, PropertyDescriptor::Empty());
+
+ RootedId id(cx);
+ RootedValue v(cx);
+
+ // Steps 3-4.
+ id = NameToId(cx->names().enumerable);
+ bool hasEnumerable = false;
+ if (!GetPropertyIfPresent(cx, obj, id, &v, &hasEnumerable)) {
+ return false;
+ }
+ if (hasEnumerable) {
+ desc.setEnumerable(ToBoolean(v));
+ }
+
+ // Steps 5-6.
+ id = NameToId(cx->names().configurable);
+ bool hasConfigurable = false;
+ if (!GetPropertyIfPresent(cx, obj, id, &v, &hasConfigurable)) {
+ return false;
+ }
+ if (hasConfigurable) {
+ desc.setConfigurable(ToBoolean(v));
+ }
+
+ // Steps 7-8.
+ id = NameToId(cx->names().value);
+ bool hasValue = false;
+ if (!GetPropertyIfPresent(cx, obj, id, &v, &hasValue)) {
+ return false;
+ }
+ if (hasValue) {
+ desc.setValue(v);
+ }
+
+ // Steps 9-10.
+ id = NameToId(cx->names().writable);
+ bool hasWritable = false;
+ if (!GetPropertyIfPresent(cx, obj, id, &v, &hasWritable)) {
+ return false;
+ }
+ if (hasWritable) {
+ desc.setWritable(ToBoolean(v));
+ }
+
+ // Steps 11-12.
+ id = NameToId(cx->names().get);
+ bool hasGet = false;
+ if (!GetPropertyIfPresent(cx, obj, id, &v, &hasGet)) {
+ return false;
+ }
+ RootedObject getter(cx);
+ if (hasGet) {
+ if (v.isObject()) {
+ if (checkAccessors) {
+ JS_TRY_OR_RETURN_FALSE(cx,
+ CheckCallable(cx, &v.toObject(), js_getter_str));
+ }
+ getter = &v.toObject();
+ } else if (v.isUndefined()) {
+ getter = nullptr;
+ } else {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_GET_SET_FIELD, js_getter_str);
+ return false;
+ }
+ }
+
+ // Steps 13-14.
+ id = NameToId(cx->names().set);
+ bool hasSet = false;
+ if (!GetPropertyIfPresent(cx, obj, id, &v, &hasSet)) {
+ return false;
+ }
+ RootedObject setter(cx);
+ if (hasSet) {
+ if (v.isObject()) {
+ if (checkAccessors) {
+ JS_TRY_OR_RETURN_FALSE(cx,
+ CheckCallable(cx, &v.toObject(), js_setter_str));
+ }
+ setter = &v.toObject();
+ } else if (v.isUndefined()) {
+ setter = nullptr;
+ } else {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_GET_SET_FIELD, js_setter_str);
+ return false;
+ }
+ }
+
+ // Step 15.
+ if (hasGet || hasSet) {
+ if (hasValue || hasWritable) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_INVALID_DESCRIPTOR);
+ return false;
+ }
+
+ // We delay setGetter/setSetter after the previous check,
+ // because otherwise we would assert.
+ if (hasGet) {
+ desc.setGetter(getter);
+ }
+ if (hasSet) {
+ desc.setSetter(setter);
+ }
+ }
+
+ desc.assertValid();
+ desc_.set(desc);
+ return true;
+}
+
+Result<> js::CheckPropertyDescriptorAccessors(JSContext* cx,
+ Handle<PropertyDescriptor> desc) {
+ if (desc.hasGetter()) {
+ MOZ_TRY(CheckCallable(cx, desc.getter(), js_getter_str));
+ }
+
+ if (desc.hasSetter()) {
+ MOZ_TRY(CheckCallable(cx, desc.setter(), js_setter_str));
+ }
+
+ return Ok();
+}
+
+// 6.2.5.6 CompletePropertyDescriptor(Desc)
+void js::CompletePropertyDescriptor(MutableHandle<PropertyDescriptor> desc) {
+ // Step 1.
+ desc.assertValid();
+
+ // Step 2.
+ // Let like be the Record { [[Value]]: undefined, [[Writable]]: false,
+ // [[Get]]: undefined, [[Set]]: undefined,
+ // [[Enumerable]]: false, [[Configurable]]: false }.
+
+ // Step 3.
+ if (desc.isGenericDescriptor() || desc.isDataDescriptor()) {
+ // Step 3.a.
+ if (!desc.hasValue()) {
+ desc.setValue(UndefinedHandleValue);
+ }
+ // Step 3.b.
+ if (!desc.hasWritable()) {
+ desc.setWritable(false);
+ }
+ } else {
+ // Step 4.a.
+ if (!desc.hasGetter()) {
+ desc.setGetter(nullptr);
+ }
+ // Step 4.b.
+ if (!desc.hasSetter()) {
+ desc.setSetter(nullptr);
+ }
+ }
+
+ // Step 5.
+ if (!desc.hasEnumerable()) {
+ desc.setEnumerable(false);
+ }
+
+ // Step 6.
+ if (!desc.hasConfigurable()) {
+ desc.setConfigurable(false);
+ }
+
+ desc.assertComplete();
+}
+
+bool js::ReadPropertyDescriptors(
+ JSContext* cx, HandleObject props, bool checkAccessors,
+ MutableHandleIdVector ids, MutableHandle<PropertyDescriptorVector> descs) {
+ if (!GetPropertyKeys(cx, props, JSITER_OWNONLY | JSITER_SYMBOLS, ids)) {
+ return false;
+ }
+
+ RootedId id(cx);
+ for (size_t i = 0, len = ids.length(); i < len; i++) {
+ id = ids[i];
+ Rooted<PropertyDescriptor> desc(cx);
+ RootedValue v(cx);
+ if (!GetProperty(cx, props, props, id, &v) ||
+ !ToPropertyDescriptor(cx, v, checkAccessors, &desc) ||
+ !descs.append(desc)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/*** Seal and freeze ********************************************************/
+
+/* ES6 draft rev 29 (6 Dec 2014) 7.3.13. */
+bool js::SetIntegrityLevel(JSContext* cx, HandleObject obj,
+ IntegrityLevel level) {
+ cx->check(obj);
+
+ // Steps 3-5. (Steps 1-2 are redundant assertions.)
+ if (!PreventExtensions(cx, obj)) {
+ return false;
+ }
+
+ // Steps 6-9, loosely interpreted.
+ if (obj->is<NativeObject>() && !obj->is<TypedArrayObject>() &&
+ !obj->is<MappedArgumentsObject>()) {
+ Handle<NativeObject*> nobj = obj.as<NativeObject>();
+
+ // Use a fast path to seal/freeze properties. This has the benefit of
+ // creating shared property maps if possible, whereas the slower/generic
+ // implementation below ends up converting non-empty objects to dictionary
+ // mode.
+ if (nobj->shape()->propMapLength() > 0) {
+ if (!NativeObject::freezeOrSealProperties(cx, nobj, level)) {
+ return false;
+ }
+ }
+
+ // Ordinarily ArraySetLength handles this, but we're going behind its back
+ // right now, so we must do this manually.
+ if (level == IntegrityLevel::Frozen && obj->is<ArrayObject>()) {
+ obj->as<ArrayObject>().setNonWritableLength(cx);
+ }
+ } else {
+ // Steps 6-7.
+ RootedIdVector keys(cx);
+ if (!GetPropertyKeys(
+ cx, obj, JSITER_HIDDEN | JSITER_OWNONLY | JSITER_SYMBOLS, &keys)) {
+ return false;
+ }
+
+ RootedId id(cx);
+ Rooted<PropertyDescriptor> desc(cx, PropertyDescriptor::Empty());
+
+ // 8.a/9.a. The two different loops are merged here.
+ for (size_t i = 0; i < keys.length(); i++) {
+ id = keys[i];
+
+ if (level == IntegrityLevel::Sealed) {
+ // 8.a.i.
+ desc.setConfigurable(false);
+ } else {
+ // 9.a.i-ii.
+ Rooted<Maybe<PropertyDescriptor>> currentDesc(cx);
+ if (!GetOwnPropertyDescriptor(cx, obj, id, &currentDesc)) {
+ return false;
+ }
+
+ // 9.a.iii.
+ if (currentDesc.isNothing()) {
+ continue;
+ }
+
+ // 9.a.iii.1-2
+ desc = PropertyDescriptor::Empty();
+ if (currentDesc->isAccessorDescriptor()) {
+ desc.setConfigurable(false);
+ } else {
+ desc.setConfigurable(false);
+ desc.setWritable(false);
+ }
+ }
+
+ // 8.a.i-ii. / 9.a.iii.3-4
+ if (!DefineProperty(cx, obj, id, desc)) {
+ return false;
+ }
+ }
+ }
+
+ // Finally, freeze or seal the dense elements.
+ if (obj->is<NativeObject>()) {
+ if (!ObjectElements::FreezeOrSeal(cx, obj.as<NativeObject>(), level)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool ResolveLazyProperties(JSContext* cx, Handle<NativeObject*> obj) {
+ const JSClass* clasp = obj->getClass();
+ if (JSEnumerateOp enumerate = clasp->getEnumerate()) {
+ if (!enumerate(cx, obj)) {
+ return false;
+ }
+ }
+ if (clasp->getNewEnumerate() && clasp->getResolve()) {
+ RootedIdVector properties(cx);
+ if (!clasp->getNewEnumerate()(cx, obj, &properties,
+ /* enumerableOnly = */ false)) {
+ return false;
+ }
+
+ RootedId id(cx);
+ for (size_t i = 0; i < properties.length(); i++) {
+ id = properties[i];
+ bool found;
+ if (!HasOwnProperty(cx, obj, id, &found)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+// ES6 draft rev33 (12 Feb 2015) 7.3.15
+bool js::TestIntegrityLevel(JSContext* cx, HandleObject obj,
+ IntegrityLevel level, bool* result) {
+ // Steps 3-6. (Steps 1-2 are redundant assertions.)
+ bool status;
+ if (!IsExtensible(cx, obj, &status)) {
+ return false;
+ }
+ if (status) {
+ *result = false;
+ return true;
+ }
+
+ // Fast path for native objects.
+ if (obj->is<NativeObject>()) {
+ Handle<NativeObject*> nobj = obj.as<NativeObject>();
+
+ // Force lazy properties to be resolved.
+ if (!ResolveLazyProperties(cx, nobj)) {
+ return false;
+ }
+
+ // Typed array elements are configurable, writable properties, so if any
+ // elements are present, the typed array can neither be sealed nor frozen.
+ if (nobj->is<TypedArrayObject>() &&
+ nobj->as<TypedArrayObject>().length() > 0) {
+ *result = false;
+ return true;
+ }
+
+ bool hasDenseElements = false;
+ for (size_t i = 0; i < nobj->getDenseInitializedLength(); i++) {
+ if (nobj->containsDenseElement(i)) {
+ hasDenseElements = true;
+ break;
+ }
+ }
+
+ if (hasDenseElements) {
+ // Unless the sealed flag is set, dense elements are configurable.
+ if (!nobj->denseElementsAreSealed()) {
+ *result = false;
+ return true;
+ }
+
+ // Unless the frozen flag is set, dense elements are writable.
+ if (level == IntegrityLevel::Frozen && !nobj->denseElementsAreFrozen()) {
+ *result = false;
+ return true;
+ }
+ }
+
+ // Steps 7-9.
+ for (ShapePropertyIter<NoGC> iter(nobj->shape()); !iter.done(); iter++) {
+ // Steps 9.c.i-ii.
+ if (iter->configurable() ||
+ (level == IntegrityLevel::Frozen && iter->isDataDescriptor() &&
+ iter->writable())) {
+ // Private fields on objects don't participate in the frozen state, and
+ // so should be elided from checking for frozen state.
+ if (iter->key().isPrivateName()) {
+ continue;
+ }
+
+ *result = false;
+ return true;
+ }
+ }
+ } else {
+ // Steps 7-8.
+ RootedIdVector props(cx);
+ if (!GetPropertyKeys(
+ cx, obj, JSITER_HIDDEN | JSITER_OWNONLY | JSITER_SYMBOLS, &props)) {
+ return false;
+ }
+
+ // Step 9.
+ RootedId id(cx);
+ Rooted<Maybe<PropertyDescriptor>> desc(cx);
+ for (size_t i = 0, len = props.length(); i < len; i++) {
+ id = props[i];
+
+ // Steps 9.a-b.
+ if (!GetOwnPropertyDescriptor(cx, obj, id, &desc)) {
+ return false;
+ }
+
+ // Step 9.c.
+ if (desc.isNothing()) {
+ continue;
+ }
+
+ // Steps 9.c.i-ii.
+ if (desc->configurable() ||
+ (level == IntegrityLevel::Frozen && desc->isDataDescriptor() &&
+ desc->writable())) {
+ // Since we don't request JSITER_PRIVATE in GetPropertyKeys above, we
+ // should never see a private name here.
+ MOZ_ASSERT(!id.isPrivateName());
+ *result = false;
+ return true;
+ }
+ }
+ }
+
+ // Step 10.
+ *result = true;
+ return true;
+}
+
+/* * */
+
+static MOZ_ALWAYS_INLINE NativeObject* NewObject(JSContext* cx,
+ const JSClass* clasp,
+ Handle<TaggedProto> proto,
+ gc::AllocKind kind,
+ NewObjectKind newKind) {
+ MOZ_ASSERT(clasp->isNativeObject());
+
+ // Some classes have specialized allocation functions and shouldn't end up
+ // here.
+ MOZ_ASSERT(clasp != &ArrayObject::class_);
+ MOZ_ASSERT(clasp != &PlainObject::class_);
+ MOZ_ASSERT(!clasp->isJSFunction());
+
+ // Computing nfixed based on the AllocKind isn't right for objects which can
+ // store fixed data inline (TypedArrays and ArrayBuffers) so for simplicity
+ // and performance reasons we don't support such objects here.
+ MOZ_ASSERT(!ClassCanHaveFixedData(clasp));
+ size_t nfixed = GetGCKindSlots(kind);
+
+ if (CanChangeToBackgroundAllocKind(kind, clasp)) {
+ kind = ForegroundToBackgroundAllocKind(kind);
+ }
+
+ Rooted<SharedShape*> shape(
+ cx, SharedShape::getInitialShape(cx, clasp, cx->realm(), proto, nfixed,
+ ObjectFlags()));
+ if (!shape) {
+ return nullptr;
+ }
+
+ gc::Heap heap = GetInitialHeap(newKind, clasp);
+ NativeObject* obj = NativeObject::create(cx, kind, heap, shape);
+ if (!obj) {
+ return nullptr;
+ }
+
+ probes::CreateObject(cx, obj);
+ return obj;
+}
+
+NativeObject* js::NewObjectWithGivenTaggedProto(JSContext* cx,
+ const JSClass* clasp,
+ Handle<TaggedProto> proto,
+ gc::AllocKind allocKind,
+ NewObjectKind newKind) {
+ return NewObject(cx, clasp, proto, allocKind, newKind);
+}
+
+NativeObject* js::NewObjectWithClassProto(JSContext* cx, const JSClass* clasp,
+ HandleObject protoArg,
+ gc::AllocKind allocKind,
+ NewObjectKind newKind) {
+ if (protoArg) {
+ return NewObjectWithGivenTaggedProto(cx, clasp, AsTaggedProto(protoArg),
+ allocKind, newKind);
+ }
+
+ // Find the appropriate proto for clasp. Built-in classes have a cached
+ // proto on cx->global(); all others get %ObjectPrototype%.
+ JSProtoKey protoKey = JSCLASS_CACHED_PROTO_KEY(clasp);
+ if (protoKey == JSProto_Null) {
+ protoKey = JSProto_Object;
+ }
+
+ JSObject* proto = GlobalObject::getOrCreatePrototype(cx, protoKey);
+ if (!proto) {
+ return nullptr;
+ }
+
+ Rooted<TaggedProto> taggedProto(cx, TaggedProto(proto));
+ return NewObject(cx, clasp, taggedProto, allocKind, newKind);
+}
+
+bool js::GetPrototypeFromConstructor(JSContext* cx, HandleObject newTarget,
+ JSProtoKey intrinsicDefaultProto,
+ MutableHandleObject proto) {
+ RootedValue protov(cx);
+ if (!GetProperty(cx, newTarget, newTarget, cx->names().prototype, &protov)) {
+ return false;
+ }
+ if (protov.isObject()) {
+ proto.set(&protov.toObject());
+ } else if (newTarget->is<JSFunction>() &&
+ newTarget->as<JSFunction>().realm() == cx->realm()) {
+ // Steps 4.a-b fetch the builtin prototype of the current realm, which we
+ // represent as nullptr.
+ proto.set(nullptr);
+ } else if (intrinsicDefaultProto == JSProto_Null) {
+ // Bug 1317416. The caller did not pass a reasonable JSProtoKey, so let the
+ // caller select a prototype object. Most likely they will choose one from
+ // the wrong realm.
+ proto.set(nullptr);
+ } else {
+ // Step 4.a: Let realm be ? GetFunctionRealm(constructor);
+ Realm* realm = JS::GetFunctionRealm(cx, newTarget);
+ if (!realm) {
+ return false;
+ }
+
+ // Step 4.b: Set proto to realm's intrinsic object named
+ // intrinsicDefaultProto.
+ {
+ Maybe<AutoRealm> ar;
+ if (cx->realm() != realm) {
+ ar.emplace(cx, realm->maybeGlobal());
+ }
+ proto.set(GlobalObject::getOrCreatePrototype(cx, intrinsicDefaultProto));
+ }
+ if (!proto) {
+ return false;
+ }
+ if (!cx->compartment()->wrap(cx, proto)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/* static */
+bool JSObject::nonNativeSetProperty(JSContext* cx, HandleObject obj,
+ HandleId id, HandleValue v,
+ HandleValue receiver,
+ ObjectOpResult& result) {
+ return obj->getOpsSetProperty()(cx, obj, id, v, receiver, result);
+}
+
+/* static */
+bool JSObject::nonNativeSetElement(JSContext* cx, HandleObject obj,
+ uint32_t index, HandleValue v,
+ HandleValue receiver,
+ ObjectOpResult& result) {
+ RootedId id(cx);
+ if (!IndexToId(cx, index, &id)) {
+ return false;
+ }
+ return nonNativeSetProperty(cx, obj, id, v, receiver, result);
+}
+
+static bool CopyPropertyFrom(JSContext* cx, HandleId id, HandleObject target,
+ HandleObject obj) {
+ // |target| must not be a CCW because we need to enter its realm below and
+ // CCWs are not associated with a single realm.
+ MOZ_ASSERT(!IsCrossCompartmentWrapper(target));
+
+ // |obj| and |cx| are generally not same-compartment with |target| here.
+ cx->check(obj, id);
+ Rooted<mozilla::Maybe<PropertyDescriptor>> desc(cx);
+
+ if (!GetOwnPropertyDescriptor(cx, obj, id, &desc)) {
+ return false;
+ }
+ MOZ_ASSERT(desc.isSome());
+
+ JSAutoRealm ar(cx, target);
+ cx->markId(id);
+ RootedId wrappedId(cx, id);
+ if (!cx->compartment()->wrap(cx, &desc)) {
+ return false;
+ }
+
+ Rooted<PropertyDescriptor> desc_(cx, *desc);
+ return DefineProperty(cx, target, wrappedId, desc_);
+}
+
+JS_PUBLIC_API bool JS_CopyOwnPropertiesAndPrivateFields(JSContext* cx,
+ HandleObject target,
+ HandleObject obj) {
+ // Both |obj| and |target| must not be CCWs because we need to enter their
+ // realms below and CCWs are not associated with a single realm.
+ MOZ_ASSERT(!IsCrossCompartmentWrapper(obj));
+ MOZ_ASSERT(!IsCrossCompartmentWrapper(target));
+
+ JSAutoRealm ar(cx, obj);
+
+ RootedIdVector props(cx);
+ if (!GetPropertyKeys(
+ cx, obj,
+ JSITER_PRIVATE | JSITER_OWNONLY | JSITER_HIDDEN | JSITER_SYMBOLS,
+ &props)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < props.length(); ++i) {
+ if (!CopyPropertyFrom(cx, props[i], target, obj)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool InitializePropertiesFromCompatibleNativeObject(
+ JSContext* cx, Handle<NativeObject*> dst, Handle<NativeObject*> src) {
+ cx->check(src, dst);
+ MOZ_ASSERT(src->getClass() == dst->getClass());
+ MOZ_ASSERT(dst->shape()->objectFlags().isEmpty());
+ MOZ_ASSERT(src->numFixedSlots() == dst->numFixedSlots());
+ MOZ_ASSERT(!src->inDictionaryMode());
+ MOZ_ASSERT(!dst->inDictionaryMode());
+
+ if (!dst->ensureElements(cx, src->getDenseInitializedLength())) {
+ return false;
+ }
+
+ uint32_t initialized = src->getDenseInitializedLength();
+ for (uint32_t i = 0; i < initialized; ++i) {
+ dst->setDenseInitializedLength(i + 1);
+ dst->initDenseElement(i, src->getDenseElement(i));
+ }
+
+ // If there are no properties to copy, we're done.
+ if (!src->sharedShape()->propMap()) {
+ return true;
+ }
+
+ Rooted<SharedShape*> shape(cx);
+ if (src->staticPrototype() == dst->staticPrototype()) {
+ shape = src->sharedShape();
+ } else {
+ // We need to generate a new shape for dst that has dst's proto but all
+ // the property information from src. Note that we asserted above that
+ // dst's object flags are empty.
+ SharedShape* srcShape = src->sharedShape();
+ ObjectFlags objFlags;
+ objFlags = CopyPropMapObjectFlags(objFlags, srcShape->objectFlags());
+ Rooted<SharedPropMap*> map(cx, srcShape->propMap());
+ uint32_t mapLength = srcShape->propMapLength();
+ shape = SharedShape::getPropMapShape(cx, dst->shape()->base(),
+ dst->numFixedSlots(), map, mapLength,
+ objFlags);
+ if (!shape) {
+ return false;
+ }
+ }
+
+ uint32_t oldSpan = dst->sharedShape()->slotSpan();
+ uint32_t newSpan = shape->slotSpan();
+ if (!dst->setShapeAndAddNewSlots(cx, shape, oldSpan, newSpan)) {
+ return false;
+ }
+ for (size_t i = JSCLASS_RESERVED_SLOTS(src->getClass()); i < newSpan; i++) {
+ dst->setSlot(i, src->getSlot(i));
+ }
+
+ return true;
+}
+
+JS_PUBLIC_API bool JS_InitializePropertiesFromCompatibleNativeObject(
+ JSContext* cx, HandleObject dst, HandleObject src) {
+ return InitializePropertiesFromCompatibleNativeObject(
+ cx, dst.as<NativeObject>(), src.as<NativeObject>());
+}
+
+bool js::ObjectMayBeSwapped(const JSObject* obj) {
+ const JSClass* clasp = obj->getClass();
+
+ // We want to optimize Window/globals and Gecko doesn't require transplanting
+ // them (only the WindowProxy around them). A Window may be a DOMClass, so we
+ // explicitly check if this is a global.
+ if (clasp->isGlobal()) {
+ return false;
+ }
+
+ // WindowProxy, Wrapper, DeadProxyObject, DOMProxy, and DOMClass (non-global)
+ // types may be swapped. It is hard to detect DOMProxy from shell, so target
+ // proxies in general.
+ return clasp->isProxyObject() || clasp->isDOMClass();
+}
+
+bool NativeObject::prepareForSwap(JSContext* cx,
+ MutableHandleValueVector slotValuesOut) {
+ MOZ_ASSERT(slotValuesOut.empty());
+
+ for (size_t i = 0; i < slotSpan(); i++) {
+ if (!slotValuesOut.append(getSlot(i))) {
+ return false;
+ }
+ }
+
+ if (hasDynamicSlots()) {
+ ObjectSlots* slotsHeader = getSlotsHeader();
+ size_t size = ObjectSlots::allocSize(slotsHeader->capacity());
+ RemoveCellMemory(this, size, MemoryUse::ObjectSlots);
+ if (!cx->nursery().isInside(slotsHeader)) {
+ if (!isTenured()) {
+ cx->nursery().removeMallocedBuffer(slotsHeader, size);
+ }
+ js_free(slotsHeader);
+ }
+ setEmptyDynamicSlots(0);
+ }
+
+ if (hasDynamicElements()) {
+ ObjectElements* elements = getElementsHeader();
+ void* allocatedElements = getUnshiftedElementsHeader();
+ size_t count = elements->numAllocatedElements();
+ size_t size = count * sizeof(HeapSlot);
+
+ if (isTenured()) {
+ RemoveCellMemory(this, size, MemoryUse::ObjectElements);
+ } else if (cx->nursery().isInside(allocatedElements)) {
+ // Move nursery allocated elements in case they end up in a tenured
+ // object.
+ ObjectElements* newElements =
+ reinterpret_cast<ObjectElements*>(js_pod_malloc<HeapSlot>(count));
+ if (!newElements) {
+ return false;
+ }
+
+ memmove(newElements, elements, size);
+ elements_ = newElements->elements();
+ } else {
+ cx->nursery().removeMallocedBuffer(allocatedElements, size);
+ }
+ MOZ_ASSERT(hasDynamicElements());
+ }
+
+ return true;
+}
+
+/* static */
+bool NativeObject::fixupAfterSwap(JSContext* cx, Handle<NativeObject*> obj,
+ gc::AllocKind kind,
+ HandleValueVector slotValues) {
+ // This object has just been swapped with some other object, and its shape
+ // no longer reflects its allocated size. Correct this information and
+ // fill the slots in with the specified values.
+ MOZ_ASSERT_IF(!obj->inDictionaryMode(),
+ obj->slotSpan() == slotValues.length());
+
+ // Make sure the shape's numFixedSlots() is correct.
+ size_t nfixed = gc::GetGCKindSlots(kind);
+ if (nfixed != obj->shape()->numFixedSlots()) {
+ if (!NativeObject::changeNumFixedSlotsAfterSwap(cx, obj, nfixed)) {
+ return false;
+ }
+ MOZ_ASSERT(obj->shape()->numFixedSlots() == nfixed);
+ }
+
+ uint32_t oldDictionarySlotSpan =
+ obj->inDictionaryMode() ? slotValues.length() : 0;
+
+ MOZ_ASSERT(!obj->hasUniqueId());
+ size_t ndynamic =
+ calculateDynamicSlots(nfixed, slotValues.length(), obj->getClass());
+ size_t currentSlots = obj->getSlotsHeader()->capacity();
+ MOZ_ASSERT(ndynamic >= currentSlots);
+ if (ndynamic > currentSlots) {
+ if (!obj->growSlots(cx, currentSlots, ndynamic)) {
+ return false;
+ }
+ }
+
+ if (obj->inDictionaryMode()) {
+ obj->setDictionaryModeSlotSpan(oldDictionarySlotSpan);
+ }
+
+ for (size_t i = 0, len = slotValues.length(); i < len; i++) {
+ obj->initSlotUnchecked(i, slotValues[i]);
+ }
+
+ if (obj->hasDynamicElements()) {
+ ObjectElements* elements = obj->getElementsHeader();
+ void* allocatedElements = obj->getUnshiftedElementsHeader();
+ MOZ_ASSERT(!cx->nursery().isInside(allocatedElements));
+ size_t size = elements->numAllocatedElements() * sizeof(HeapSlot);
+ if (obj->isTenured()) {
+ AddCellMemory(obj, size, MemoryUse::ObjectElements);
+ } else if (!cx->nursery().registerMallocedBuffer(allocatedElements, size)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+[[nodiscard]] bool ProxyObject::prepareForSwap(
+ JSContext* cx, MutableHandleValueVector valuesOut) {
+ MOZ_ASSERT(valuesOut.empty());
+
+ // Remove the GCPtr<Value>s we're about to swap from the store buffer, to
+ // ensure we don't trace bogus values.
+ gc::StoreBuffer& sb = cx->runtime()->gc.storeBuffer();
+
+ // Reserve space for the expando, private slot and the reserved slots.
+ if (!valuesOut.reserve(2 + numReservedSlots())) {
+ return false;
+ }
+
+ js::detail::ProxyValueArray* valArray = data.values();
+ sb.unputValue(&valArray->expandoSlot);
+ sb.unputValue(&valArray->privateSlot);
+ valuesOut.infallibleAppend(valArray->expandoSlot);
+ valuesOut.infallibleAppend(valArray->privateSlot);
+
+ for (size_t i = 0; i < numReservedSlots(); i++) {
+ sb.unputValue(&valArray->reservedSlots.slots[i]);
+ valuesOut.infallibleAppend(valArray->reservedSlots.slots[i]);
+ }
+
+ if (isTenured() && !usingInlineValueArray()) {
+ size_t count = detail::ProxyValueArray::allocCount(numReservedSlots());
+ RemoveCellMemory(this, count * sizeof(Value),
+ MemoryUse::ProxyExternalValueArray);
+ js_free(valArray);
+ data.reservedSlots = nullptr;
+ }
+
+ return true;
+}
+
+bool ProxyObject::fixupAfterSwap(JSContext* cx,
+ const HandleValueVector values) {
+ MOZ_ASSERT(getClass()->isProxyObject());
+
+ size_t nreserved = numReservedSlots();
+
+ // |values| contains the expando slot, private slot and the reserved slots.
+ MOZ_ASSERT(values.length() == 2 + nreserved);
+
+ // Allocate the external value array in malloc memory, even for nursery
+ // proxies.
+ size_t count = detail::ProxyValueArray::allocCount(nreserved);
+ auto* allocation = js_pod_malloc<JS::Value>(count);
+ if (!allocation) {
+ return false;
+ }
+
+ size_t size = count * sizeof(Value);
+ if (isTenured()) {
+ AddCellMemory(&asTenured(), size, MemoryUse::ProxyExternalValueArray);
+ } else if (!cx->nursery().registerMallocedBuffer(allocation, size)) {
+ js_free(allocation);
+ return false;
+ }
+
+ auto* valArray = reinterpret_cast<js::detail::ProxyValueArray*>(allocation);
+
+ valArray->expandoSlot = values[0];
+ valArray->privateSlot = values[1];
+
+ for (size_t i = 0; i < nreserved; i++) {
+ valArray->reservedSlots.slots[i] = values[i + 2];
+ }
+
+ data.reservedSlots = &valArray->reservedSlots;
+ MOZ_ASSERT(!usingInlineValueArray());
+ return true;
+}
+
+static gc::AllocKind SwappableObjectAllocKind(JSObject* obj) {
+ MOZ_ASSERT(ObjectMayBeSwapped(obj));
+
+ if (obj->isTenured()) {
+ return obj->asTenured().getAllocKind();
+ }
+
+ if (obj->is<NativeObject>()) {
+ return obj->as<NativeObject>().allocKindForTenure();
+ }
+
+ return obj->as<ProxyObject>().allocKindForTenure();
+}
+
+/* Use this method with extreme caution. It trades the guts of two objects. */
+void JSObject::swap(JSContext* cx, HandleObject a, HandleObject b,
+ AutoEnterOOMUnsafeRegion& oomUnsafe) {
+ // Ensure swap doesn't cause a finalizer to be run at the wrong time.
+ MOZ_ASSERT(a->isBackgroundFinalized() == b->isBackgroundFinalized());
+
+ MOZ_ASSERT(a->compartment() == b->compartment());
+
+ // You must have entered the objects' compartment before calling this.
+ MOZ_ASSERT(cx->compartment() == a->compartment());
+
+ // Only certain types of objects are allowed to be swapped. This allows the
+ // JITs to better optimize objects that can never swap and rules out most
+ // builtin objects that have special behaviour.
+ MOZ_RELEASE_ASSERT(js::ObjectMayBeSwapped(a));
+ MOZ_RELEASE_ASSERT(js::ObjectMayBeSwapped(b));
+
+ if (!Watchtower::watchObjectSwap(cx, a, b)) {
+ oomUnsafe.crash("watchObjectSwap");
+ }
+
+ // Ensure we update any embedded nursery pointers in either object.
+ gc::StoreBuffer& storeBuffer = cx->runtime()->gc.storeBuffer();
+ if (a->isTenured()) {
+ storeBuffer.putWholeCell(a);
+ }
+ if (b->isTenured()) {
+ storeBuffer.putWholeCell(b);
+ }
+ if (a->isTenured() || b->isTenured()) {
+ if (a->zone()->wasGCStarted()) {
+ storeBuffer.setMayHavePointersToDeadCells();
+ }
+ }
+
+ unsigned r = NotifyGCPreSwap(a, b);
+
+ ProxyObject* pa = a->is<ProxyObject>() ? &a->as<ProxyObject>() : nullptr;
+ ProxyObject* pb = b->is<ProxyObject>() ? &b->as<ProxyObject>() : nullptr;
+ bool aIsProxyWithInlineValues = pa && pa->usingInlineValueArray();
+ bool bIsProxyWithInlineValues = pb && pb->usingInlineValueArray();
+
+ bool aIsUsedAsPrototype = a->isUsedAsPrototype();
+ bool bIsUsedAsPrototype = b->isUsedAsPrototype();
+
+ // Swap element associations.
+ Zone* zone = a->zone();
+
+ // Record any associated unique IDs and prepare for swap.
+ //
+ // Note that unique IDs are NOT swapped but remain associated with the
+ // original address.
+ uint64_t aid = 0;
+ uint64_t bid = 0;
+ (void)gc::MaybeGetUniqueId(a, &aid);
+ (void)gc::MaybeGetUniqueId(b, &bid);
+ NativeObject* na = a->is<NativeObject>() ? &a->as<NativeObject>() : nullptr;
+ NativeObject* nb = b->is<NativeObject>() ? &b->as<NativeObject>() : nullptr;
+ if ((aid || bid) && (na || nb)) {
+ // We can't remove unique IDs from native objects when they are swapped with
+ // objects without an ID. Instead ensure they both have IDs so we always
+ // have something to overwrite the old ID with.
+ if (!gc::GetOrCreateUniqueId(a, &aid) ||
+ !gc::GetOrCreateUniqueId(b, &bid)) {
+ oomUnsafe.crash("Failed to create unique ID during swap");
+ }
+
+ // IDs stored in NativeObjects could shadow those stored in the zone
+ // table. Remove any zone table IDs first.
+ if (pa && aid) {
+ gc::RemoveUniqueId(a);
+ }
+ if (pb && bid) {
+ gc::RemoveUniqueId(b);
+ }
+ }
+
+ gc::AllocKind ka = SwappableObjectAllocKind(a);
+ gc::AllocKind kb = SwappableObjectAllocKind(b);
+
+ size_t sa = gc::Arena::thingSize(ka);
+ size_t sb = gc::Arena::thingSize(kb);
+ if (sa == sb && a->isTenured() == b->isTenured()) {
+ // When both objects are the same size and in the same heap, just do a plain
+ // swap of their contents.
+
+ // Swap slot associations.
+ zone->swapCellMemory(a, b, MemoryUse::ObjectSlots);
+
+ size_t size = sa;
+ char tmp[sizeof(JSObject_Slots16)];
+ MOZ_ASSERT(size <= sizeof(tmp));
+
+ js_memcpy(tmp, a, size);
+ js_memcpy(a, b, size);
+ js_memcpy(b, tmp, size);
+
+ zone->swapCellMemory(a, b, MemoryUse::ObjectElements);
+ zone->swapCellMemory(a, b, MemoryUse::ProxyExternalValueArray);
+
+ if (aIsProxyWithInlineValues) {
+ b->as<ProxyObject>().setInlineValueArray();
+ }
+ if (bIsProxyWithInlineValues) {
+ a->as<ProxyObject>().setInlineValueArray();
+ }
+ } else {
+ // Avoid GC in here to avoid confusing the tracing code with our
+ // intermediate state.
+ gc::AutoSuppressGC suppress(cx);
+
+ // When the objects have different sizes, they will have different numbers
+ // of fixed slots before and after the swap, so the slots for native objects
+ // will need to be rearranged. Remember the original values from the
+ // objects.
+ RootedValueVector avals(cx);
+ RootedValueVector bvals(cx);
+ if (na && !na->prepareForSwap(cx, &avals)) {
+ oomUnsafe.crash("NativeObject::prepareForSwap");
+ }
+ if (nb && !nb->prepareForSwap(cx, &bvals)) {
+ oomUnsafe.crash("NativeObject::prepareForSwap");
+ }
+
+ // Do the same for proxy value arrays.
+ if (pa && !pa->prepareForSwap(cx, &avals)) {
+ oomUnsafe.crash("ProxyObject::prepareForSwap");
+ }
+ if (pb && !pb->prepareForSwap(cx, &bvals)) {
+ oomUnsafe.crash("ProxyObject::prepareForSwap");
+ }
+
+ // Swap the main fields of the objects, whether they are native objects or
+ // proxies.
+ char tmp[sizeof(JSObject_Slots0)];
+ js_memcpy(&tmp, a, sizeof tmp);
+ js_memcpy(a, b, sizeof tmp);
+ js_memcpy(b, &tmp, sizeof tmp);
+
+ if (na &&
+ !NativeObject::fixupAfterSwap(cx, b.as<NativeObject>(), kb, avals)) {
+ oomUnsafe.crash("NativeObject::fixupAfterSwap");
+ }
+ if (nb &&
+ !NativeObject::fixupAfterSwap(cx, a.as<NativeObject>(), ka, bvals)) {
+ oomUnsafe.crash("NativeObject::fixupAfterSwap");
+ }
+
+ if (pa && !b->as<ProxyObject>().fixupAfterSwap(cx, avals)) {
+ oomUnsafe.crash("ProxyObject::fixupAfterSwap");
+ }
+ if (pb && !a->as<ProxyObject>().fixupAfterSwap(cx, bvals)) {
+ oomUnsafe.crash("ProxyObject::fixupAfterSwap");
+ }
+ }
+
+ // Restore original unique IDs.
+ if ((aid || bid) && (na || nb)) {
+ if ((aid && !gc::SetOrUpdateUniqueId(cx, a, aid)) ||
+ (bid && !gc::SetOrUpdateUniqueId(cx, b, bid))) {
+ oomUnsafe.crash("Failed to set unique ID after swap");
+ }
+ }
+ MOZ_ASSERT_IF(aid, gc::GetUniqueIdInfallible(a) == aid);
+ MOZ_ASSERT_IF(bid, gc::GetUniqueIdInfallible(b) == bid);
+
+ // Preserve the IsUsedAsPrototype flag on the objects.
+ if (aIsUsedAsPrototype) {
+ if (!JSObject::setIsUsedAsPrototype(cx, a)) {
+ oomUnsafe.crash("setIsUsedAsPrototype");
+ }
+ }
+ if (bIsUsedAsPrototype) {
+ if (!JSObject::setIsUsedAsPrototype(cx, b)) {
+ oomUnsafe.crash("setIsUsedAsPrototype");
+ }
+ }
+
+ /*
+ * We need a write barrier here. If |a| was marked and |b| was not, then
+ * after the swap, |b|'s guts would never be marked. The write barrier
+ * solves this.
+ *
+ * Normally write barriers happen before the write. However, that's not
+ * necessary here because nothing is being destroyed. We're just swapping.
+ */
+ PreWriteBarrier(zone, a.get(), [](JSTracer* trc, JSObject* obj) {
+ obj->traceChildren(trc);
+ });
+ PreWriteBarrier(zone, b.get(), [](JSTracer* trc, JSObject* obj) {
+ obj->traceChildren(trc);
+ });
+
+ NotifyGCPostSwap(a, b, r);
+}
+
+static NativeObject* DefineConstructorAndPrototype(
+ JSContext* cx, HandleObject obj, Handle<JSAtom*> atom,
+ HandleObject protoProto, const JSClass* clasp, Native constructor,
+ unsigned nargs, const JSPropertySpec* ps, const JSFunctionSpec* fs,
+ const JSPropertySpec* static_ps, const JSFunctionSpec* static_fs,
+ NativeObject** ctorp) {
+ // Create the prototype object.
+ Rooted<NativeObject*> proto(
+ cx, GlobalObject::createBlankPrototypeInheriting(cx, clasp, protoProto));
+ if (!proto) {
+ return nullptr;
+ }
+
+ Rooted<NativeObject*> ctor(cx);
+ if (!constructor) {
+ ctor = proto;
+ } else {
+ ctor = NewNativeConstructor(cx, constructor, nargs, atom);
+ if (!ctor) {
+ return nullptr;
+ }
+
+ if (!LinkConstructorAndPrototype(cx, ctor, proto)) {
+ return nullptr;
+ }
+ }
+
+ if (!DefinePropertiesAndFunctions(cx, proto, ps, fs) ||
+ (ctor != proto &&
+ !DefinePropertiesAndFunctions(cx, ctor, static_ps, static_fs))) {
+ return nullptr;
+ }
+
+ RootedId id(cx, AtomToId(atom));
+ RootedValue value(cx, ObjectValue(*ctor));
+ if (!DefineDataProperty(cx, obj, id, value, 0)) {
+ return nullptr;
+ }
+
+ if (ctorp) {
+ *ctorp = ctor;
+ }
+ return proto;
+}
+
+NativeObject* js::InitClass(JSContext* cx, HandleObject obj,
+ const JSClass* protoClass, HandleObject protoProto_,
+ const char* name, Native constructor,
+ unsigned nargs, const JSPropertySpec* ps,
+ const JSFunctionSpec* fs,
+ const JSPropertySpec* static_ps,
+ const JSFunctionSpec* static_fs,
+ NativeObject** ctorp) {
+ Rooted<JSAtom*> atom(cx, Atomize(cx, name, strlen(name)));
+ if (!atom) {
+ return nullptr;
+ }
+
+ /*
+ * All instances of the class will inherit properties from the prototype
+ * object we are about to create (in DefineConstructorAndPrototype), which
+ * in turn will inherit from protoProto.
+ *
+ * If protoProto is nullptr, default to Object.prototype.
+ * If protoClass is nullptr, default to PlainObject.
+ */
+ RootedObject protoProto(cx, protoProto_);
+ if (!protoProto) {
+ protoProto = &cx->global()->getObjectPrototype();
+ }
+ if (!protoClass) {
+ protoClass = &PlainObject::class_;
+ }
+
+ return DefineConstructorAndPrototype(cx, obj, atom, protoProto, protoClass,
+ constructor, nargs, ps, fs, static_ps,
+ static_fs, ctorp);
+}
+
+/**
+ * Returns the original Object.prototype from the embedding-provided incumbent
+ * global.
+ *
+ * Really, we want the incumbent global itself so we can pass it to other
+ * embedding hooks which need it. Specifically, the enqueue promise hook
+ * takes an incumbent global so it can set that on the PromiseCallbackJob
+ * it creates.
+ *
+ * The reason for not just returning the global itself is that we'd need to
+ * wrap it into the current compartment, and later unwrap it. Unwrapping
+ * globals is tricky, though: we might accidentally unwrap through an inner
+ * to its outer window and end up with the wrong global. Plain objects don't
+ * have this problem, so we use the global's Object.prototype. The code using
+ * it - e.g. EnqueuePromiseReactionJob - can then unwrap the object and get
+ * its global without fear of unwrapping too far.
+ */
+bool js::GetObjectFromIncumbentGlobal(JSContext* cx, MutableHandleObject obj) {
+ Rooted<GlobalObject*> globalObj(cx, cx->runtime()->getIncumbentGlobal(cx));
+ if (!globalObj) {
+ obj.set(nullptr);
+ return true;
+ }
+
+ obj.set(&globalObj->getObjectPrototype());
+
+ // The object might be from a different compartment, so wrap it.
+ if (obj && !cx->compartment()->wrap(cx, obj)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStandardPrototype(JSObject* obj, JSProtoKey key) {
+ return obj->nonCCWGlobal().maybeGetPrototype(key) == obj;
+}
+
+JSProtoKey JS::IdentifyStandardInstance(JSObject* obj) {
+ // Note: The prototype shares its JSClass with instances.
+ MOZ_ASSERT(!obj->is<CrossCompartmentWrapperObject>());
+ JSProtoKey key = StandardProtoKeyOrNull(obj);
+ if (key != JSProto_Null && !IsStandardPrototype(obj, key)) {
+ return key;
+ }
+ return JSProto_Null;
+}
+
+JSProtoKey JS::IdentifyStandardPrototype(JSObject* obj) {
+ // Note: The prototype shares its JSClass with instances.
+ MOZ_ASSERT(!obj->is<CrossCompartmentWrapperObject>());
+ JSProtoKey key = StandardProtoKeyOrNull(obj);
+ if (key != JSProto_Null && IsStandardPrototype(obj, key)) {
+ return key;
+ }
+ return JSProto_Null;
+}
+
+JSProtoKey JS::IdentifyStandardInstanceOrPrototype(JSObject* obj) {
+ return StandardProtoKeyOrNull(obj);
+}
+
+JSProtoKey JS::IdentifyStandardConstructor(JSObject* obj) {
+ // Note that isNativeConstructor does not imply that we are a standard
+ // constructor, but the converse is true (at least until we start having
+ // self-hosted constructors for standard classes). This lets us avoid a costly
+ // loop for many functions (which, depending on the call site, may be the
+ // common case).
+ if (!obj->is<JSFunction>() ||
+ !(obj->as<JSFunction>().flags().isNativeConstructor())) {
+ return JSProto_Null;
+ }
+
+ static_assert(JSProto_Null == 0,
+ "Loop below can start at 1 to skip JSProto_Null");
+
+ GlobalObject& global = obj->as<JSFunction>().global();
+ for (size_t k = 1; k < JSProto_LIMIT; ++k) {
+ JSProtoKey key = static_cast<JSProtoKey>(k);
+ if (global.maybeGetConstructor(key) == obj) {
+ return key;
+ }
+ }
+
+ return JSProto_Null;
+}
+
+bool js::LookupProperty(JSContext* cx, HandleObject obj, js::HandleId id,
+ MutableHandleObject objp, PropertyResult* propp) {
+ if (LookupPropertyOp op = obj->getOpsLookupProperty()) {
+ return op(cx, obj, id, objp, propp);
+ }
+ return NativeLookupPropertyInline<CanGC>(cx, obj.as<NativeObject>(), id, objp,
+ propp);
+}
+
+bool js::LookupName(JSContext* cx, Handle<PropertyName*> name,
+ HandleObject envChain, MutableHandleObject objp,
+ MutableHandleObject pobjp, PropertyResult* propp) {
+ RootedId id(cx, NameToId(name));
+
+ for (RootedObject env(cx, envChain); env; env = env->enclosingEnvironment()) {
+ if (!LookupProperty(cx, env, id, pobjp, propp)) {
+ return false;
+ }
+ if (propp->isFound()) {
+ objp.set(env);
+ return true;
+ }
+ }
+
+ objp.set(nullptr);
+ pobjp.set(nullptr);
+ propp->setNotFound();
+ return true;
+}
+
+bool js::LookupNameNoGC(JSContext* cx, PropertyName* name, JSObject* envChain,
+ JSObject** objp, NativeObject** pobjp,
+ PropertyResult* propp) {
+ AutoAssertNoPendingException nogc(cx);
+
+ MOZ_ASSERT(!*objp && !*pobjp && propp->isNotFound());
+
+ for (JSObject* env = envChain; env; env = env->enclosingEnvironment()) {
+ if (env->getOpsLookupProperty()) {
+ return false;
+ }
+ if (!NativeLookupPropertyInline<NoGC>(cx, &env->as<NativeObject>(),
+ NameToId(name), pobjp, propp)) {
+ return false;
+ }
+ if (propp->isFound()) {
+ *objp = env;
+ return true;
+ }
+ }
+
+ return true;
+}
+
+bool js::LookupNameWithGlobalDefault(JSContext* cx, Handle<PropertyName*> name,
+ HandleObject envChain,
+ MutableHandleObject objp) {
+ RootedId id(cx, NameToId(name));
+
+ RootedObject pobj(cx);
+ PropertyResult prop;
+
+ RootedObject env(cx, envChain);
+ for (; !env->is<GlobalObject>(); env = env->enclosingEnvironment()) {
+ if (!LookupProperty(cx, env, id, &pobj, &prop)) {
+ return false;
+ }
+ if (prop.isFound()) {
+ break;
+ }
+ }
+
+ objp.set(env);
+ return true;
+}
+
+bool js::LookupNameUnqualified(JSContext* cx, Handle<PropertyName*> name,
+ HandleObject envChain,
+ MutableHandleObject objp) {
+ RootedId id(cx, NameToId(name));
+
+ RootedObject pobj(cx);
+ PropertyResult prop;
+
+ RootedObject env(cx, envChain);
+ for (; !env->isUnqualifiedVarObj(); env = env->enclosingEnvironment()) {
+ if (!LookupProperty(cx, env, id, &pobj, &prop)) {
+ return false;
+ }
+ if (prop.isFound()) {
+ break;
+ }
+ }
+
+ // See note above RuntimeLexicalErrorObject.
+ if (pobj == env) {
+ bool isTDZ = false;
+ if (prop.isFound() && name != cx->names().dotThis) {
+ // Treat Debugger environments specially for TDZ checks, as they
+ // look like non-native environments but in fact wrap native
+ // environments.
+ if (env->is<DebugEnvironmentProxy>()) {
+ RootedValue v(cx);
+ Rooted<DebugEnvironmentProxy*> envProxy(
+ cx, &env->as<DebugEnvironmentProxy>());
+ if (!DebugEnvironmentProxy::getMaybeSentinelValue(cx, envProxy, id,
+ &v)) {
+ return false;
+ }
+ isTDZ = IsUninitializedLexical(v);
+ } else {
+ isTDZ = IsUninitializedLexicalSlot(env, prop);
+ }
+ }
+
+ if (isTDZ) {
+ env = RuntimeLexicalErrorObject::create(cx, env,
+ JSMSG_UNINITIALIZED_LEXICAL);
+ if (!env) {
+ return false;
+ }
+ } else if (env->is<LexicalEnvironmentObject>() &&
+ !prop.propertyInfo().writable()) {
+ // Assigning to a named lambda callee name is a no-op in sloppy mode.
+ if (!(env->is<BlockLexicalEnvironmentObject>() &&
+ env->as<BlockLexicalEnvironmentObject>().scope().kind() ==
+ ScopeKind::NamedLambda)) {
+ MOZ_ASSERT(name != cx->names().dotThis);
+ env =
+ RuntimeLexicalErrorObject::create(cx, env, JSMSG_BAD_CONST_ASSIGN);
+ if (!env) {
+ return false;
+ }
+ }
+ }
+ }
+
+ objp.set(env);
+ return true;
+}
+
+bool js::HasOwnProperty(JSContext* cx, HandleObject obj, HandleId id,
+ bool* result) {
+ if (obj->is<ProxyObject>()) {
+ return Proxy::hasOwn(cx, obj, id, result);
+ }
+
+ if (GetOwnPropertyOp op = obj->getOpsGetOwnPropertyDescriptor()) {
+ Rooted<mozilla::Maybe<PropertyDescriptor>> desc(cx);
+ if (!op(cx, obj, id, &desc)) {
+ return false;
+ }
+ *result = desc.isSome();
+ return true;
+ }
+
+ PropertyResult prop;
+ if (!NativeLookupOwnProperty<CanGC>(cx, obj.as<NativeObject>(), id, &prop)) {
+ return false;
+ }
+ *result = prop.isFound();
+ return true;
+}
+
+bool js::LookupPropertyPure(JSContext* cx, JSObject* obj, jsid id,
+ NativeObject** objp, PropertyResult* propp) {
+ if (obj->getOpsLookupProperty()) {
+ return false;
+ }
+ return NativeLookupPropertyInline<NoGC, LookupResolveMode::CheckMayResolve>(
+ cx, &obj->as<NativeObject>(), id, objp, propp);
+}
+
+bool js::LookupOwnPropertyPure(JSContext* cx, JSObject* obj, jsid id,
+ PropertyResult* propp) {
+ if (obj->getOpsLookupProperty()) {
+ return false;
+ }
+ return NativeLookupOwnPropertyInline<NoGC,
+ LookupResolveMode::CheckMayResolve>(
+ cx, &obj->as<NativeObject>(), id, propp);
+}
+
+static inline bool NativeGetPureInline(NativeObject* pobj, jsid id,
+ PropertyResult prop, Value* vp,
+ JSContext* cx) {
+ if (prop.isDenseElement()) {
+ *vp = pobj->getDenseElement(prop.denseElementIndex());
+ return true;
+ }
+ if (prop.isTypedArrayElement()) {
+ size_t idx = prop.typedArrayElementIndex();
+ return pobj->as<TypedArrayObject>().getElement<NoGC>(cx, idx, vp);
+ }
+
+ // Fail if we have a custom getter.
+ PropertyInfo propInfo = prop.propertyInfo();
+ if (!propInfo.isDataProperty()) {
+ return false;
+ }
+
+ *vp = pobj->getSlot(propInfo.slot());
+ MOZ_ASSERT(!vp->isMagic());
+ return true;
+}
+
+bool js::GetPropertyPure(JSContext* cx, JSObject* obj, jsid id, Value* vp) {
+ NativeObject* pobj;
+ PropertyResult prop;
+ if (!LookupPropertyPure(cx, obj, id, &pobj, &prop)) {
+ return false;
+ }
+
+ if (prop.isNotFound()) {
+ vp->setUndefined();
+ return true;
+ }
+
+ return NativeGetPureInline(pobj, id, prop, vp, cx);
+}
+
+bool js::GetOwnPropertyPure(JSContext* cx, JSObject* obj, jsid id, Value* vp,
+ bool* found) {
+ PropertyResult prop;
+ if (!LookupOwnPropertyPure(cx, obj, id, &prop)) {
+ return false;
+ }
+
+ if (prop.isNotFound()) {
+ *found = false;
+ vp->setUndefined();
+ return true;
+ }
+
+ *found = true;
+ return obj->is<NativeObject>() &&
+ NativeGetPureInline(&obj->as<NativeObject>(), id, prop, vp, cx);
+}
+
+static inline bool NativeGetGetterPureInline(NativeObject* holder,
+ PropertyResult prop,
+ JSFunction** fp) {
+ MOZ_ASSERT(prop.isNativeProperty());
+
+ PropertyInfo propInfo = prop.propertyInfo();
+ if (holder->hasGetter(propInfo)) {
+ JSObject* getter = holder->getGetter(propInfo);
+ if (getter->is<JSFunction>()) {
+ *fp = &getter->as<JSFunction>();
+ return true;
+ }
+ }
+
+ *fp = nullptr;
+ return true;
+}
+
+bool js::GetGetterPure(JSContext* cx, JSObject* obj, jsid id, JSFunction** fp) {
+ /* Just like GetPropertyPure, but get getter function, without invoking
+ * it. */
+ NativeObject* pobj;
+ PropertyResult prop;
+ if (!LookupPropertyPure(cx, obj, id, &pobj, &prop)) {
+ return false;
+ }
+
+ if (prop.isNotFound()) {
+ *fp = nullptr;
+ return true;
+ }
+
+ return prop.isNativeProperty() && NativeGetGetterPureInline(pobj, prop, fp);
+}
+
+bool js::GetOwnGetterPure(JSContext* cx, JSObject* obj, jsid id,
+ JSFunction** fp) {
+ JS::AutoCheckCannotGC nogc;
+ PropertyResult prop;
+ if (!LookupOwnPropertyPure(cx, obj, id, &prop)) {
+ return false;
+ }
+
+ if (prop.isNotFound()) {
+ *fp = nullptr;
+ return true;
+ }
+
+ return prop.isNativeProperty() &&
+ NativeGetGetterPureInline(&obj->as<NativeObject>(), prop, fp);
+}
+
+bool js::GetOwnNativeGetterPure(JSContext* cx, JSObject* obj, jsid id,
+ JSNative* native) {
+ JS::AutoCheckCannotGC nogc;
+ *native = nullptr;
+ PropertyResult prop;
+ if (!LookupOwnPropertyPure(cx, obj, id, &prop)) {
+ return false;
+ }
+
+ if (!prop.isNativeProperty()) {
+ return true;
+ }
+
+ PropertyInfo propInfo = prop.propertyInfo();
+
+ NativeObject* nobj = &obj->as<NativeObject>();
+ if (!nobj->hasGetter(propInfo)) {
+ return true;
+ }
+
+ JSObject* getterObj = nobj->getGetter(propInfo);
+ if (!getterObj->is<JSFunction>()) {
+ return true;
+ }
+
+ JSFunction* getter = &getterObj->as<JSFunction>();
+ if (!getter->isNativeFun()) {
+ return true;
+ }
+
+ *native = getter->native();
+ return true;
+}
+
+bool js::HasOwnDataPropertyPure(JSContext* cx, JSObject* obj, jsid id,
+ bool* result) {
+ PropertyResult prop;
+ if (!LookupOwnPropertyPure(cx, obj, id, &prop)) {
+ return false;
+ }
+
+ *result = prop.isNativeProperty() && prop.propertyInfo().isDataProperty();
+ return true;
+}
+
+bool js::GetPrototypeIfOrdinary(JSContext* cx, HandleObject obj,
+ bool* isOrdinary, MutableHandleObject protop) {
+ if (obj->is<js::ProxyObject>()) {
+ return js::Proxy::getPrototypeIfOrdinary(cx, obj, isOrdinary, protop);
+ }
+
+ *isOrdinary = true;
+ protop.set(obj->staticPrototype());
+ return true;
+}
+
+/*** ES6 standard internal methods ******************************************/
+
+bool js::SetPrototype(JSContext* cx, HandleObject obj, HandleObject proto,
+ JS::ObjectOpResult& result) {
+ // The proxy trap subsystem fully handles prototype-setting for proxies
+ // with dynamic [[Prototype]]s.
+ if (obj->hasDynamicPrototype()) {
+ MOZ_ASSERT(obj->is<ProxyObject>());
+ return Proxy::setPrototype(cx, obj, proto, result);
+ }
+
+ /*
+ * ES6 9.1.2 step 3-4 if |obj.[[Prototype]]| has SameValue as |proto| return
+ * true. Since the values in question are objects, we can just compare
+ * pointers.
+ */
+ if (proto == obj->staticPrototype()) {
+ return result.succeed();
+ }
+
+ /* Disallow mutation of immutable [[Prototype]]s. */
+ if (obj->staticPrototypeIsImmutable()) {
+ return result.fail(JSMSG_CANT_SET_PROTO);
+ }
+
+ /*
+ * Disallow mutating the [[Prototype]] on Typed Objects, per the spec.
+ */
+ if (obj->is<WasmGcObject>()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CANT_SET_PROTO_OF,
+ "incompatible WebAssembly object");
+ return false;
+ }
+
+ /* ES6 9.1.2 step 5 forbids changing [[Prototype]] if not [[Extensible]]. */
+ bool extensible;
+ if (!IsExtensible(cx, obj, &extensible)) {
+ return false;
+ }
+ if (!extensible) {
+ return result.fail(JSMSG_CANT_SET_PROTO);
+ }
+
+ /*
+ * ES6 9.1.2 step 6 forbids generating cyclical prototype chains. But we
+ * have to do this comparison on the observable WindowProxy, not on the
+ * possibly-Window object we're setting the proto on.
+ */
+ RootedObject objMaybeWindowProxy(cx, ToWindowProxyIfWindow(obj));
+ RootedObject obj2(cx, proto);
+ while (obj2) {
+ MOZ_ASSERT(!IsWindow(obj2));
+ if (obj2 == objMaybeWindowProxy) {
+ return result.fail(JSMSG_CANT_SET_PROTO_CYCLE);
+ }
+
+ bool isOrdinary;
+ if (!GetPrototypeIfOrdinary(cx, obj2, &isOrdinary, &obj2)) {
+ return false;
+ }
+ if (!isOrdinary) {
+ break;
+ }
+ }
+
+ Rooted<TaggedProto> taggedProto(cx, TaggedProto(proto));
+ if (!JSObject::setProtoUnchecked(cx, obj, taggedProto)) {
+ return false;
+ }
+
+ return result.succeed();
+}
+
+bool js::SetPrototype(JSContext* cx, HandleObject obj, HandleObject proto) {
+ ObjectOpResult result;
+ return SetPrototype(cx, obj, proto, result) && result.checkStrict(cx, obj);
+}
+
+bool js::PreventExtensions(JSContext* cx, HandleObject obj,
+ ObjectOpResult& result) {
+ if (obj->is<ProxyObject>()) {
+ return js::Proxy::preventExtensions(cx, obj, result);
+ }
+
+ if (!obj->nonProxyIsExtensible()) {
+ // If the following assertion fails, there's somewhere else a missing
+ // call to shrinkCapacityToInitializedLength() which needs to be found
+ // and fixed.
+ MOZ_ASSERT_IF(obj->is<NativeObject>(),
+ obj->as<NativeObject>().getDenseInitializedLength() ==
+ obj->as<NativeObject>().getDenseCapacity());
+
+ return result.succeed();
+ }
+
+ if (obj->is<NativeObject>()) {
+ // Force lazy properties to be resolved.
+ Handle<NativeObject*> nobj = obj.as<NativeObject>();
+ if (!ResolveLazyProperties(cx, nobj)) {
+ return false;
+ }
+
+ // Prepare the elements. We have to do this before we mark the object
+ // non-extensible; that's fine because these changes are not observable.
+ ObjectElements::PrepareForPreventExtensions(cx, nobj);
+ }
+
+ // Finally, set the NotExtensible flag on the Shape and ObjectElements.
+ if (!JSObject::setFlag(cx, obj, ObjectFlag::NotExtensible)) {
+ return false;
+ }
+ if (obj->is<NativeObject>()) {
+ ObjectElements::PreventExtensions(&obj->as<NativeObject>());
+ }
+
+ return result.succeed();
+}
+
+bool js::PreventExtensions(JSContext* cx, HandleObject obj) {
+ ObjectOpResult result;
+ return PreventExtensions(cx, obj, result) && result.checkStrict(cx, obj);
+}
+
+bool js::GetOwnPropertyDescriptor(
+ JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandle<Maybe<PropertyDescriptor>> desc) {
+ if (GetOwnPropertyOp op = obj->getOpsGetOwnPropertyDescriptor()) {
+ bool ok = op(cx, obj, id, desc);
+ if (ok && desc.isSome()) {
+ desc->assertComplete();
+ }
+ return ok;
+ }
+
+ return NativeGetOwnPropertyDescriptor(cx, obj.as<NativeObject>(), id, desc);
+}
+
+bool js::DefineProperty(JSContext* cx, HandleObject obj, HandleId id,
+ Handle<PropertyDescriptor> desc) {
+ ObjectOpResult result;
+ return DefineProperty(cx, obj, id, desc, result) &&
+ result.checkStrict(cx, obj, id);
+}
+
+bool js::DefineProperty(JSContext* cx, HandleObject obj, HandleId id,
+ Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result) {
+ desc.assertValid();
+ if (DefinePropertyOp op = obj->getOpsDefineProperty()) {
+ return op(cx, obj, id, desc, result);
+ }
+ return NativeDefineProperty(cx, obj.as<NativeObject>(), id, desc, result);
+}
+
+bool js::DefineAccessorProperty(JSContext* cx, HandleObject obj, HandleId id,
+ HandleObject getter, HandleObject setter,
+ unsigned attrs, ObjectOpResult& result) {
+ Rooted<PropertyDescriptor> desc(
+ cx, PropertyDescriptor::Accessor(
+ getter ? mozilla::Some(getter) : mozilla::Nothing(),
+ setter ? mozilla::Some(setter) : mozilla::Nothing(), attrs));
+
+ if (DefinePropertyOp op = obj->getOpsDefineProperty()) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ return op(cx, obj, id, desc, result);
+ }
+ return NativeDefineProperty(cx, obj.as<NativeObject>(), id, desc, result);
+}
+
+bool js::DefineDataProperty(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue value, unsigned attrs,
+ ObjectOpResult& result) {
+ Rooted<PropertyDescriptor> desc(cx, PropertyDescriptor::Data(value, attrs));
+ if (DefinePropertyOp op = obj->getOpsDefineProperty()) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ return op(cx, obj, id, desc, result);
+ }
+ return NativeDefineProperty(cx, obj.as<NativeObject>(), id, desc, result);
+}
+
+bool js::DefineAccessorProperty(JSContext* cx, HandleObject obj, HandleId id,
+ HandleObject getter, HandleObject setter,
+ unsigned attrs) {
+ ObjectOpResult result;
+ if (!DefineAccessorProperty(cx, obj, id, getter, setter, attrs, result)) {
+ return false;
+ }
+ if (!result) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ result.reportError(cx, obj, id);
+ return false;
+ }
+ return true;
+}
+
+bool js::DefineDataProperty(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue value, unsigned attrs) {
+ ObjectOpResult result;
+ if (!DefineDataProperty(cx, obj, id, value, attrs, result)) {
+ return false;
+ }
+ if (!result) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ result.reportError(cx, obj, id);
+ return false;
+ }
+ return true;
+}
+
+bool js::DefineDataProperty(JSContext* cx, HandleObject obj, PropertyName* name,
+ HandleValue value, unsigned attrs) {
+ RootedId id(cx, NameToId(name));
+ return DefineDataProperty(cx, obj, id, value, attrs);
+}
+
+bool js::DefineDataElement(JSContext* cx, HandleObject obj, uint32_t index,
+ HandleValue value, unsigned attrs) {
+ RootedId id(cx);
+ if (!IndexToId(cx, index, &id)) {
+ return false;
+ }
+ return DefineDataProperty(cx, obj, id, value, attrs);
+}
+
+/*** SpiderMonkey nonstandard internal methods ******************************/
+
+// Mark an object as having an immutable prototype
+//
+// NOTE: This does not correspond to the SetImmutablePrototype ECMAScript
+// method.
+bool js::SetImmutablePrototype(JSContext* cx, HandleObject obj,
+ bool* succeeded) {
+ if (obj->hasDynamicPrototype()) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ return Proxy::setImmutablePrototype(cx, obj, succeeded);
+ }
+
+ if (!JSObject::setFlag(cx, obj, ObjectFlag::ImmutablePrototype)) {
+ return false;
+ }
+ *succeeded = true;
+ return true;
+}
+
+bool js::GetPropertyDescriptor(
+ JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandle<mozilla::Maybe<PropertyDescriptor>> desc,
+ MutableHandleObject holder) {
+ RootedObject pobj(cx);
+ for (pobj = obj; pobj;) {
+ if (!GetOwnPropertyDescriptor(cx, pobj, id, desc)) {
+ return false;
+ }
+
+ if (desc.isSome()) {
+ holder.set(pobj);
+ return true;
+ }
+
+ if (!GetPrototype(cx, pobj, &pobj)) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(desc.isNothing());
+ holder.set(nullptr);
+ return true;
+}
+
+/* * */
+
+extern bool PropertySpecNameToId(JSContext* cx, JSPropertySpec::Name name,
+ MutableHandleId id);
+
+// If a property or method is part of an experimental feature that can be
+// disabled at run-time by a preference, we keep it in the JSFunctionSpec /
+// JSPropertySpec list, but omit the definition if the preference is off.
+JS_PUBLIC_API bool js::ShouldIgnorePropertyDefinition(JSContext* cx,
+ JSProtoKey key, jsid id) {
+ if (!cx->realm()->creationOptions().getToSourceEnabled() &&
+ (id == NameToId(cx->names().toSource) ||
+ id == NameToId(cx->names().uneval))) {
+ return true;
+ }
+
+ if (key == JSProto_FinalizationRegistry &&
+ cx->realm()->creationOptions().getWeakRefsEnabled() ==
+ JS::WeakRefSpecifier::EnabledWithoutCleanupSome &&
+ id == NameToId(cx->names().cleanupSome)) {
+ return true;
+ }
+
+#ifdef NIGHTLY_BUILD
+ if (key == JSProto_Array &&
+ !cx->realm()->creationOptions().getArrayGroupingEnabled() &&
+ (id == NameToId(cx->names().group) ||
+ id == NameToId(cx->names().groupToMap))) {
+ return true;
+ }
+#endif
+
+ // It's gently surprising that this is JSProto_Function, but the trick
+ // to realize is that this is a -constructor function-, not a function
+ // on the prototype; and the proto of the constructor is JSProto_Function.
+ if (key == JSProto_Function &&
+ !cx->realm()->creationOptions().getArrayFromAsyncEnabled() &&
+ id == NameToId(cx->names().fromAsync)) {
+ return true;
+ }
+
+ if (key == JSProto_Array &&
+ !cx->realm()->creationOptions().getChangeArrayByCopyEnabled() &&
+ (id == NameToId(cx->names().with) ||
+ id == NameToId(cx->names().toReversed) ||
+ id == NameToId(cx->names().toSorted) ||
+ id == NameToId(cx->names().toSpliced))) {
+ return true;
+ }
+
+ if (key == JSProto_TypedArray &&
+ !cx->realm()->creationOptions().getChangeArrayByCopyEnabled() &&
+ (id == NameToId(cx->names().with) ||
+ id == NameToId(cx->names().toReversed) ||
+ id == NameToId(cx->names().toSorted))) {
+ return true;
+ }
+
+#ifdef ENABLE_NEW_SET_METHODS
+ if (key == JSProto_Set &&
+ !cx->realm()->creationOptions().getNewSetMethodsEnabled() &&
+ (id == NameToId(cx->names().union_) ||
+ id == NameToId(cx->names().difference) ||
+ id == NameToId(cx->names().intersection) ||
+ id == NameToId(cx->names().isSubsetOf) ||
+ id == NameToId(cx->names().isSupersetOf) ||
+ id == NameToId(cx->names().isDisjointFrom) ||
+ id == NameToId(cx->names().symmetricDifference))) {
+ return true;
+ }
+#endif
+
+ return false;
+}
+
+static bool DefineFunctionFromSpec(JSContext* cx, HandleObject obj,
+ const JSFunctionSpec* fs) {
+ RootedId id(cx);
+ if (!PropertySpecNameToId(cx, fs->name, &id)) {
+ return false;
+ }
+
+ if (ShouldIgnorePropertyDefinition(cx, StandardProtoKeyOrNull(obj), id)) {
+ return true;
+ }
+
+ JSFunction* fun = NewFunctionFromSpec(cx, fs, id);
+ if (!fun) {
+ return false;
+ }
+
+ RootedValue funVal(cx, ObjectValue(*fun));
+ return DefineDataProperty(cx, obj, id, funVal, fs->flags & ~JSFUN_FLAGS_MASK);
+}
+
+bool js::DefineFunctions(JSContext* cx, HandleObject obj,
+ const JSFunctionSpec* fs) {
+ for (; fs->name; fs++) {
+ if (!DefineFunctionFromSpec(cx, obj, fs)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/*** ToPrimitive ************************************************************/
+
+/*
+ * Gets |obj[id]|. If that value's not callable, returns true and stores an
+ * object value in *vp. If it's callable, calls it with no arguments and |obj|
+ * as |this|, returning the result in *vp.
+ *
+ * This is a mini-abstraction for ES6 draft rev 36 (2015 Mar 17),
+ * 7.1.1, second algorithm (OrdinaryToPrimitive), steps 5.a-c.
+ */
+static bool MaybeCallMethod(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleValue vp) {
+ if (!GetProperty(cx, obj, obj, id, vp)) {
+ return false;
+ }
+ if (!IsCallable(vp)) {
+ vp.setObject(*obj);
+ return true;
+ }
+
+ return js::Call(cx, vp, obj, vp);
+}
+
+static bool ReportCantConvert(JSContext* cx, unsigned errorNumber,
+ HandleObject obj, JSType hint) {
+ const JSClass* clasp = obj->getClass();
+
+ // Avoid recursive death when decompiling in ReportValueError.
+ RootedString str(cx);
+ if (hint == JSTYPE_STRING) {
+ str = JS_AtomizeString(cx, clasp->name);
+ if (!str) {
+ return false;
+ }
+ } else {
+ str = nullptr;
+ }
+
+ RootedValue val(cx, ObjectValue(*obj));
+ ReportValueError(cx, errorNumber, JSDVG_SEARCH_STACK, val, str,
+ hint == JSTYPE_UNDEFINED ? "primitive type"
+ : hint == JSTYPE_STRING ? "string"
+ : "number");
+ return false;
+}
+
+bool JS::OrdinaryToPrimitive(JSContext* cx, HandleObject obj, JSType hint,
+ MutableHandleValue vp) {
+ MOZ_ASSERT(hint == JSTYPE_NUMBER || hint == JSTYPE_STRING ||
+ hint == JSTYPE_UNDEFINED);
+
+ Rooted<jsid> id(cx);
+
+ const JSClass* clasp = obj->getClass();
+ if (hint == JSTYPE_STRING) {
+ id = NameToId(cx->names().toString);
+
+ bool calledToString = false;
+ if (clasp == &StringObject::class_) {
+ // Optimize (new String(...)).toString().
+ StringObject* nobj = &obj->as<StringObject>();
+ if (HasNativeMethodPure(nobj, cx->names().toString, str_toString, cx)) {
+ vp.setString(nobj->unbox());
+ return true;
+ }
+ } else if (clasp == &PlainObject::class_) {
+ JSFunction* fun;
+ if (GetPropertyPure(cx, obj, id, vp.address()) &&
+ IsFunctionObject(vp, &fun)) {
+ // Common case: we have a toString function. Try to short-circuit if
+ // it's Object.prototype.toString and there's no @@toStringTag.
+ if (fun->maybeNative() == obj_toString &&
+ !MaybeHasInterestingSymbolProperty(
+ cx, obj, cx->wellKnownSymbols().toStringTag)) {
+ vp.setString(cx->names().objectObject);
+ return true;
+ }
+ if (!js::Call(cx, vp, obj, vp)) {
+ return false;
+ }
+ calledToString = true;
+ }
+ }
+
+ if (!calledToString) {
+ if (!MaybeCallMethod(cx, obj, id, vp)) {
+ return false;
+ }
+ }
+ if (vp.isPrimitive()) {
+ return true;
+ }
+
+ id = NameToId(cx->names().valueOf);
+ if (!MaybeCallMethod(cx, obj, id, vp)) {
+ return false;
+ }
+ if (vp.isPrimitive()) {
+ return true;
+ }
+ } else {
+ id = NameToId(cx->names().valueOf);
+
+ if (clasp == &StringObject::class_) {
+ // Optimize new String(...).valueOf().
+ StringObject* nobj = &obj->as<StringObject>();
+ if (HasNativeMethodPure(nobj, cx->names().valueOf, str_toString, cx)) {
+ vp.setString(nobj->unbox());
+ return true;
+ }
+ } else if (clasp == &NumberObject::class_) {
+ // Optimize new Number(...).valueOf().
+ NumberObject* nobj = &obj->as<NumberObject>();
+ if (HasNativeMethodPure(nobj, cx->names().valueOf, num_valueOf, cx)) {
+ vp.setNumber(nobj->unbox());
+ return true;
+ }
+ }
+
+ if (!MaybeCallMethod(cx, obj, id, vp)) {
+ return false;
+ }
+ if (vp.isPrimitive()) {
+ return true;
+ }
+
+ id = NameToId(cx->names().toString);
+ if (!MaybeCallMethod(cx, obj, id, vp)) {
+ return false;
+ }
+ if (vp.isPrimitive()) {
+ return true;
+ }
+ }
+
+ return ReportCantConvert(cx, JSMSG_CANT_CONVERT_TO, obj, hint);
+}
+
+bool js::ToPrimitiveSlow(JSContext* cx, JSType preferredType,
+ MutableHandleValue vp) {
+ // Step numbers refer to the first algorithm listed in ES6 draft rev 36
+ // (2015 Mar 17) 7.1.1 ToPrimitive.
+ MOZ_ASSERT(preferredType == JSTYPE_UNDEFINED ||
+ preferredType == JSTYPE_STRING || preferredType == JSTYPE_NUMBER);
+ RootedObject obj(cx, &vp.toObject());
+
+ // Steps 4-5.
+ RootedValue method(cx);
+ if (!GetInterestingSymbolProperty(cx, obj, cx->wellKnownSymbols().toPrimitive,
+ &method)) {
+ return false;
+ }
+
+ // Step 6.
+ if (!method.isNullOrUndefined()) {
+ // Step 6 of GetMethod. js::Call() below would do this check and throw a
+ // TypeError anyway, but this produces a better error message.
+ if (!IsCallable(method)) {
+ return ReportCantConvert(cx, JSMSG_TOPRIMITIVE_NOT_CALLABLE, obj,
+ preferredType);
+ }
+
+ // Steps 1-3, 6.a-b.
+ RootedValue arg0(
+ cx,
+ StringValue(preferredType == JSTYPE_STRING ? cx->names().string
+ : preferredType == JSTYPE_NUMBER ? cx->names().number
+ : cx->names().default_));
+
+ if (!js::Call(cx, method, vp, arg0, vp)) {
+ return false;
+ }
+
+ // Steps 6.c-d.
+ if (vp.isObject()) {
+ return ReportCantConvert(cx, JSMSG_TOPRIMITIVE_RETURNED_OBJECT, obj,
+ preferredType);
+ }
+ return true;
+ }
+
+ return OrdinaryToPrimitive(cx, obj, preferredType, vp);
+}
+
+/* ES6 draft rev 28 (2014 Oct 14) 7.1.14 */
+bool js::ToPropertyKeySlow(JSContext* cx, HandleValue argument,
+ MutableHandleId result) {
+ MOZ_ASSERT(argument.isObject());
+
+ // Steps 1-2.
+ RootedValue key(cx, argument);
+ if (!ToPrimitiveSlow(cx, JSTYPE_STRING, &key)) {
+ return false;
+ }
+
+ // Steps 3-4.
+ return PrimitiveValueToId<CanGC>(cx, key, result);
+}
+
+/* * */
+
+bool js::IsPrototypeOf(JSContext* cx, HandleObject protoObj, JSObject* obj,
+ bool* result) {
+ RootedObject obj2(cx, obj);
+ for (;;) {
+ // The [[Prototype]] chain might be cyclic.
+ if (!CheckForInterrupt(cx)) {
+ return false;
+ }
+ if (!GetPrototype(cx, obj2, &obj2)) {
+ return false;
+ }
+ if (!obj2) {
+ *result = false;
+ return true;
+ }
+ if (obj2 == protoObj) {
+ *result = true;
+ return true;
+ }
+ }
+}
+
+JSObject* js::PrimitiveToObject(JSContext* cx, const Value& v) {
+ MOZ_ASSERT(v.isPrimitive());
+
+ switch (v.type()) {
+ case ValueType::String: {
+ Rooted<JSString*> str(cx, v.toString());
+ return StringObject::create(cx, str);
+ }
+ case ValueType::Double:
+ case ValueType::Int32:
+ return NumberObject::create(cx, v.toNumber());
+ case ValueType::Boolean:
+ return BooleanObject::create(cx, v.toBoolean());
+ case ValueType::Symbol: {
+ RootedSymbol symbol(cx, v.toSymbol());
+ return SymbolObject::create(cx, symbol);
+ }
+ case ValueType::BigInt: {
+ RootedBigInt bigInt(cx, v.toBigInt());
+ return BigIntObject::create(cx, bigInt);
+ }
+#ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive: {
+ JSObject& obj = v.toExtendedPrimitive();
+
+ if (obj.is<RecordType>()) {
+ Rooted<RecordType*> rec(cx, &obj.as<RecordType>());
+ return RecordObject::create(cx, rec);
+ }
+ if (obj.is<TupleType>()) {
+ Rooted<TupleType*> tuple(cx, &obj.as<TupleType>());
+ return TupleObject::create(cx, tuple);
+ }
+
+ MOZ_CRASH("Unexpected ExtendedPrimitive type.");
+ }
+#endif
+ case ValueType::Undefined:
+ case ValueType::Null:
+ case ValueType::Magic:
+ case ValueType::PrivateGCThing:
+ case ValueType::Object:
+ break;
+ }
+
+ MOZ_CRASH("unexpected type");
+}
+
+// Like PrimitiveToObject, but returns the JSProtoKey of the prototype that
+// would be used without actually creating the object.
+JSProtoKey js::PrimitiveToProtoKey(JSContext* cx, const Value& v) {
+ MOZ_ASSERT(v.isPrimitive());
+
+ switch (v.type()) {
+ case ValueType::String:
+ return JSProto_String;
+ case ValueType::Double:
+ case ValueType::Int32:
+ return JSProto_Number;
+ case ValueType::Boolean:
+ return JSProto_Boolean;
+ case ValueType::Symbol:
+ return JSProto_Symbol;
+ case ValueType::BigInt:
+ return JSProto_BigInt;
+#ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive:
+ if (v.toExtendedPrimitive().is<TupleType>()) {
+ return JSProto_Tuple;
+ }
+ if (v.toExtendedPrimitive().is<RecordType>()) {
+ return JSProto_Null;
+ }
+ MOZ_CRASH("Unsupported ExtendedPrimitive");
+#endif
+ case ValueType::Undefined:
+ case ValueType::Null:
+ case ValueType::Magic:
+ case ValueType::PrivateGCThing:
+ case ValueType::Object:
+ break;
+ }
+
+ MOZ_CRASH("unexpected type");
+}
+
+/*
+ * Invokes the ES5 ToObject algorithm on vp, returning the result. If vp might
+ * already be an object, use ToObject. reportScanStack controls how null and
+ * undefined errors are reported.
+ *
+ * Callers must handle the already-object case.
+ */
+JSObject* js::ToObjectSlow(JSContext* cx, JS::HandleValue val,
+ bool reportScanStack) {
+ MOZ_ASSERT(!val.isMagic());
+ MOZ_ASSERT(!val.isObject());
+
+ if (val.isNullOrUndefined()) {
+ ReportIsNullOrUndefinedForPropertyAccess(
+ cx, val, reportScanStack ? JSDVG_SEARCH_STACK : JSDVG_IGNORE_STACK);
+ return nullptr;
+ }
+
+ return PrimitiveToObject(cx, val);
+}
+
+JSObject* js::ToObjectSlowForPropertyAccess(JSContext* cx, JS::HandleValue val,
+ int valIndex, HandleId key) {
+ MOZ_ASSERT(!val.isMagic());
+ MOZ_ASSERT(!val.isObject());
+
+ if (val.isNullOrUndefined()) {
+ ReportIsNullOrUndefinedForPropertyAccess(cx, val, valIndex, key);
+ return nullptr;
+ }
+
+ return PrimitiveToObject(cx, val);
+}
+
+JSObject* js::ToObjectSlowForPropertyAccess(JSContext* cx, JS::HandleValue val,
+ int valIndex,
+ Handle<PropertyName*> key) {
+ MOZ_ASSERT(!val.isMagic());
+ MOZ_ASSERT(!val.isObject());
+
+ if (val.isNullOrUndefined()) {
+ RootedId keyId(cx, NameToId(key));
+ ReportIsNullOrUndefinedForPropertyAccess(cx, val, valIndex, keyId);
+ return nullptr;
+ }
+
+ return PrimitiveToObject(cx, val);
+}
+
+JSObject* js::ToObjectSlowForPropertyAccess(JSContext* cx, JS::HandleValue val,
+ int valIndex,
+ HandleValue keyValue) {
+ MOZ_ASSERT(!val.isMagic());
+ MOZ_ASSERT(!val.isObject());
+
+ if (val.isNullOrUndefined()) {
+ RootedId key(cx);
+ if (keyValue.isPrimitive()) {
+ if (!PrimitiveValueToId<CanGC>(cx, keyValue, &key)) {
+ return nullptr;
+ }
+ ReportIsNullOrUndefinedForPropertyAccess(cx, val, valIndex, key);
+ } else {
+ ReportIsNullOrUndefinedForPropertyAccess(cx, val, valIndex);
+ }
+ return nullptr;
+ }
+
+ return PrimitiveToObject(cx, val);
+}
+
+JSObject* js::GetThisObject(JSObject* obj) {
+ // Use the WindowProxy if the global is a Window, as Window must never be
+ // exposed to script.
+ if (obj->is<GlobalObject>()) {
+ return ToWindowProxyIfWindow(obj);
+ }
+
+ // We should not expose any environments except NSVOs to script. The NSVO is
+ // pretending to be the global object in this case.
+ MOZ_ASSERT(obj->is<NonSyntacticVariablesObject>() ||
+ !obj->is<EnvironmentObject>());
+
+ return obj;
+}
+
+JSObject* js::GetThisObjectOfLexical(JSObject* env) {
+ return env->as<ExtensibleLexicalEnvironmentObject>().thisObject();
+}
+
+JSObject* js::GetThisObjectOfWith(JSObject* env) {
+ MOZ_ASSERT(env->is<WithEnvironmentObject>());
+ return GetThisObject(env->as<WithEnvironmentObject>().withThis());
+}
+
+class GetObjectSlotNameFunctor : public JS::TracingContext::Functor {
+ JSObject* obj;
+
+ public:
+ explicit GetObjectSlotNameFunctor(JSObject* ctx) : obj(ctx) {}
+ virtual void operator()(JS::TracingContext* trc, char* buf,
+ size_t bufsize) override;
+};
+
+void GetObjectSlotNameFunctor::operator()(JS::TracingContext* tcx, char* buf,
+ size_t bufsize) {
+ MOZ_ASSERT(tcx->index() != JS::TracingContext::InvalidIndex);
+
+ uint32_t slot = uint32_t(tcx->index());
+
+ Maybe<PropertyKey> key;
+ if (obj->is<NativeObject>()) {
+ NativeShape* shape = obj->as<NativeObject>().shape();
+ for (ShapePropertyIter<NoGC> iter(shape); !iter.done(); iter++) {
+ if (iter->hasSlot() && iter->slot() == slot) {
+ key.emplace(iter->key());
+ break;
+ }
+ }
+ }
+
+ if (key.isNothing()) {
+ do {
+ const char* slotname = nullptr;
+ const char* pattern = nullptr;
+ if (obj->is<GlobalObject>()) {
+ pattern = "CLASS_OBJECT(%s)";
+ if (false) {
+ ;
+ }
+#define TEST_SLOT_MATCHES_PROTOTYPE(name, clasp) \
+ else if ((JSProto_##name) == slot) { \
+ slotname = js_##name##_str; \
+ }
+ JS_FOR_EACH_PROTOTYPE(TEST_SLOT_MATCHES_PROTOTYPE)
+#undef TEST_SLOT_MATCHES_PROTOTYPE
+ } else {
+ pattern = "%s";
+ if (obj->is<EnvironmentObject>()) {
+ if (slot == EnvironmentObject::enclosingEnvironmentSlot()) {
+ slotname = "enclosing_environment";
+ } else if (obj->is<CallObject>()) {
+ if (slot == CallObject::calleeSlot()) {
+ slotname = "callee_slot";
+ }
+ } else if (obj->is<WithEnvironmentObject>()) {
+ if (slot == WithEnvironmentObject::objectSlot()) {
+ slotname = "with_object";
+ } else if (slot == WithEnvironmentObject::thisSlot()) {
+ slotname = "with_this";
+ }
+ }
+ }
+ }
+
+ if (slotname) {
+ snprintf(buf, bufsize, pattern, slotname);
+ } else {
+ snprintf(buf, bufsize, "**UNKNOWN SLOT %" PRIu32 "**", slot);
+ }
+ } while (false);
+ } else {
+ if (key->isInt()) {
+ snprintf(buf, bufsize, "%" PRId32, key->toInt());
+ } else if (key->isAtom()) {
+ PutEscapedString(buf, bufsize, key->toAtom(), 0);
+ } else if (key->isSymbol()) {
+ snprintf(buf, bufsize, "**SYMBOL KEY**");
+ } else {
+ snprintf(buf, bufsize, "**FINALIZED ATOM KEY**");
+ }
+ }
+}
+
+/*** Debugging routines *****************************************************/
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+
+/*
+ * Routines to print out values during debugging. These are FRIEND_API to help
+ * the debugger find them and to support temporarily hacking js::Dump* calls
+ * into other code.
+ */
+
+static void dumpValue(const Value& v, js::GenericPrinter& out) {
+ switch (v.type()) {
+ case ValueType::Null:
+ out.put("null");
+ break;
+ case ValueType::Undefined:
+ out.put("undefined");
+ break;
+ case ValueType::Int32:
+ out.printf("%d", v.toInt32());
+ break;
+ case ValueType::Double:
+ out.printf("%g", v.toDouble());
+ break;
+ case ValueType::String:
+ v.toString()->dumpNoNewline(out);
+ break;
+ case ValueType::Symbol:
+ v.toSymbol()->dump(out);
+ break;
+ case ValueType::BigInt:
+ v.toBigInt()->dump(out);
+ break;
+ case ValueType::Object:
+ if (v.toObject().is<JSFunction>()) {
+ JSFunction* fun = &v.toObject().as<JSFunction>();
+ if (fun->displayAtom()) {
+ out.put("<function ");
+ EscapedStringPrinter(out, fun->displayAtom(), 0);
+ } else {
+ out.put("<unnamed function");
+ }
+ if (fun->hasBaseScript()) {
+ BaseScript* script = fun->baseScript();
+ out.printf(" (%s:%u)", script->filename() ? script->filename() : "",
+ script->lineno());
+ }
+ out.printf(" at %p>", (void*)fun);
+ } else {
+ JSObject* obj = &v.toObject();
+ const JSClass* clasp = obj->getClass();
+ out.printf("<%s%s at %p>", clasp->name,
+ (clasp == &PlainObject::class_) ? "" : " object",
+ (void*)obj);
+ }
+ break;
+# ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive: {
+ JSObject* obj = &v.toExtendedPrimitive();
+ out.printf("<%s at %p>", obj->getClass()->name, (void*)obj);
+ break;
+ }
+# endif
+ case ValueType::Boolean:
+ if (v.toBoolean()) {
+ out.put("true");
+ } else {
+ out.put("false");
+ }
+ break;
+ case ValueType::Magic:
+ out.put("<magic");
+ switch (v.whyMagic()) {
+ case JS_ELEMENTS_HOLE:
+ out.put(" elements hole");
+ break;
+ case JS_NO_ITER_VALUE:
+ out.put(" no iter value");
+ break;
+ case JS_GENERATOR_CLOSING:
+ out.put(" generator closing");
+ break;
+ case JS_OPTIMIZED_OUT:
+ out.put(" optimized out");
+ break;
+ default:
+ out.put(" ?!");
+ break;
+ }
+ out.putChar('>');
+ break;
+ case ValueType::PrivateGCThing:
+ out.printf("<PrivateGCThing %p>", v.toGCThing());
+ break;
+ }
+}
+
+namespace js {
+
+// We don't want jsfriendapi.h to depend on GenericPrinter,
+// so these functions are declared directly in the cpp.
+
+JS_PUBLIC_API void DumpValue(const JS::Value& val, js::GenericPrinter& out);
+
+JS_PUBLIC_API void DumpId(jsid id, js::GenericPrinter& out);
+
+JS_PUBLIC_API void DumpInterpreterFrame(JSContext* cx, js::GenericPrinter& out,
+ InterpreterFrame* start = nullptr);
+
+} // namespace js
+
+JS_PUBLIC_API void js::DumpValue(const Value& val, js::GenericPrinter& out) {
+ dumpValue(val, out);
+ out.putChar('\n');
+}
+
+JS_PUBLIC_API void js::DumpId(jsid id, js::GenericPrinter& out) {
+ out.printf("jsid %p = ", (void*)id.asRawBits());
+ dumpValue(IdToValue(id), out);
+ out.putChar('\n');
+}
+
+static void DumpProperty(const NativeObject* obj, PropMap* map, uint32_t index,
+ js::GenericPrinter& out) {
+ PropertyInfoWithKey prop = map->getPropertyInfoWithKey(index);
+ jsid id = prop.key();
+ if (id.isAtom()) {
+ id.toAtom()->dumpCharsNoNewline(out);
+ } else if (id.isInt()) {
+ out.printf("%d", id.toInt());
+ } else if (id.isSymbol()) {
+ id.toSymbol()->dump(out);
+ } else {
+ out.printf("id %p", reinterpret_cast<void*>(id.asRawBits()));
+ }
+
+ if (prop.isDataProperty()) {
+ out.printf(": ");
+ dumpValue(obj->getSlot(prop.slot()), out);
+ } else if (prop.isAccessorProperty()) {
+ out.printf(": getter %p setter %p", obj->getGetter(prop),
+ obj->getSetter(prop));
+ }
+
+ out.printf(" (map %p/%u", map, index);
+
+ if (prop.enumerable()) {
+ out.put(" enumerable");
+ }
+ if (prop.configurable()) {
+ out.put(" configurable");
+ }
+ if (prop.isDataDescriptor() && prop.writable()) {
+ out.put(" writable");
+ }
+
+ if (prop.isCustomDataProperty()) {
+ out.printf(" <custom-data-prop>");
+ }
+
+ if (prop.hasSlot()) {
+ out.printf(" slot %u", prop.slot());
+ }
+
+ out.printf(")\n");
+}
+
+bool JSObject::hasSameRealmAs(JSContext* cx) const {
+ return nonCCWRealm() == cx->realm();
+}
+
+bool JSObject::uninlinedIsProxyObject() const { return is<ProxyObject>(); }
+
+bool JSObject::uninlinedNonProxyIsExtensible() const {
+ return nonProxyIsExtensible();
+}
+
+void JSObject::dump(js::GenericPrinter& out) const {
+ const JSObject* obj = this;
+ out.printf("object %p\n", obj);
+
+ if (IsCrossCompartmentWrapper(this)) {
+ out.printf(" compartment %p\n", compartment());
+ } else {
+ JSObject* globalObj = &nonCCWGlobal();
+ out.printf(" global %p [%s]\n", globalObj, globalObj->getClass()->name);
+ }
+
+ const JSClass* clasp = obj->getClass();
+ out.printf(" class %p %s\n", clasp, clasp->name);
+
+ if (IsProxy(obj)) {
+ auto* handler = GetProxyHandler(obj);
+ out.printf(" handler %p", handler);
+ if (IsDeadProxyObject(obj)) {
+ out.printf(" (DeadObjectProxy)");
+ } else if (IsCrossCompartmentWrapper(obj)) {
+ out.printf(" (CCW)");
+ }
+ out.putChar('\n');
+
+ Value priv = GetProxyPrivate(obj);
+ if (!priv.isUndefined()) {
+ out.printf(" private ");
+ dumpValue(priv, out);
+ out.putChar('\n');
+ }
+
+ Value expando = GetProxyExpando(obj);
+ if (!expando.isNull()) {
+ out.printf(" expando ");
+ dumpValue(expando, out);
+ out.putChar('\n');
+ }
+ }
+
+ const Shape* shape = obj->shape();
+ out.printf(" shape %p\n", shape);
+
+ out.put(" flags:");
+ if (obj->isUsedAsPrototype()) {
+ out.put(" used_as_prototype");
+ }
+ if (!obj->is<ProxyObject>() && !obj->nonProxyIsExtensible()) {
+ out.put(" not_extensible");
+ }
+ if (obj->maybeHasInterestingSymbolProperty()) {
+ out.put(" maybe_has_interesting_symbol");
+ }
+ if (obj->isQualifiedVarObj()) {
+ out.put(" varobj");
+ }
+ if (obj->isUnqualifiedVarObj()) {
+ out.put(" unqualified_varobj");
+ }
+ if (obj->hasInvalidatedTeleporting()) {
+ out.put(" invalidated_teleporting");
+ }
+ if (obj->hasStaticPrototype() && obj->staticPrototypeIsImmutable()) {
+ out.put(" immutable_prototype");
+ }
+
+ const NativeObject* nobj =
+ obj->is<NativeObject>() ? &obj->as<NativeObject>() : nullptr;
+ if (nobj) {
+ if (nobj->inDictionaryMode()) {
+ out.put(" inDictionaryMode");
+ }
+ if (nobj->hadGetterSetterChange()) {
+ out.put(" had_getter_setter_change");
+ }
+ if (nobj->isIndexed()) {
+ out.put(" indexed");
+ }
+ if (nobj->hasEnumerableProperty()) {
+ out.put(" has_enumerable");
+ }
+ if (nobj->is<PlainObject>() &&
+ nobj->as<PlainObject>().hasNonWritableOrAccessorPropExclProto()) {
+ out.put(" has_non_writable_or_accessor_prop_excl_proto");
+ }
+ if (!nobj->denseElementsArePacked()) {
+ out.put(" non_packed_elements");
+ }
+ if (nobj->getElementsHeader()->isNotExtensible()) {
+ out.put(" not_extensible");
+ }
+ if (nobj->getElementsHeader()->isSealed()) {
+ out.put(" sealed_elements");
+ }
+ if (nobj->getElementsHeader()->isFrozen()) {
+ out.put(" frozen_elements");
+ }
+ if (nobj->getElementsHeader()->maybeInIteration()) {
+ out.put(" elements_maybe_in_iteration");
+ }
+ } else {
+ out.put(" not_native");
+ }
+ out.putChar('\n');
+
+ out.put(" proto ");
+ TaggedProto proto = obj->taggedProto();
+ if (proto.isDynamic()) {
+ out.put("<dynamic>");
+ } else {
+ dumpValue(ObjectOrNullValue(proto.toObjectOrNull()), out);
+ }
+ out.putChar('\n');
+
+ if (nobj) {
+ uint32_t reserved = JSCLASS_RESERVED_SLOTS(clasp);
+ if (reserved) {
+ out.printf(" reserved slots:\n");
+ for (uint32_t i = 0; i < reserved; i++) {
+ out.printf(" %3u ", i);
+ out.put(": ");
+ dumpValue(nobj->getSlot(i), out);
+ out.putChar('\n');
+ }
+ }
+
+ out.put(" properties:\n");
+
+ if (PropMap* map = nobj->shape()->propMap()) {
+ Vector<PropMap*, 8, SystemAllocPolicy> maps;
+ while (true) {
+ if (!maps.append(map)) {
+ out.printf("(OOM while appending maps)\n");
+ break;
+ }
+ if (!map->hasPrevious()) {
+ break;
+ }
+ map = map->asLinked()->previous();
+ }
+
+ for (size_t i = maps.length(); i > 0; i--) {
+ size_t index = i - 1;
+ uint32_t len =
+ (index == 0) ? nobj->shape()->propMapLength() : PropMap::Capacity;
+ for (uint32_t j = 0; j < len; j++) {
+ PropMap* map = maps[index];
+ if (!map->hasKey(j)) {
+ MOZ_ASSERT(map->isDictionary());
+ continue;
+ }
+ out.printf(" ");
+ DumpProperty(nobj, map, j, out);
+ }
+ }
+ }
+
+ uint32_t slots = nobj->getDenseInitializedLength();
+ if (slots) {
+ out.put(" elements:\n");
+ for (uint32_t i = 0; i < slots; i++) {
+ out.printf(" %3u: ", i);
+ dumpValue(nobj->getDenseElement(i), out);
+ out.putChar('\n');
+ }
+ }
+ }
+}
+
+// For debuggers.
+void JSObject::dump() const {
+ Fprinter out(stderr);
+ dump(out);
+}
+
+static void MaybeDumpScope(Scope* scope, js::GenericPrinter& out) {
+ if (scope) {
+ out.printf(" scope: %s\n", ScopeKindString(scope->kind()));
+ for (BindingIter bi(scope); bi; bi++) {
+ out.put(" ");
+ dumpValue(StringValue(bi.name()), out);
+ out.putChar('\n');
+ }
+ }
+}
+
+static void MaybeDumpValue(const char* name, const Value& v,
+ js::GenericPrinter& out) {
+ if (!v.isNull()) {
+ out.printf(" %s: ", name);
+ dumpValue(v, out);
+ out.putChar('\n');
+ }
+}
+
+JS_PUBLIC_API void js::DumpInterpreterFrame(JSContext* cx,
+ js::GenericPrinter& out,
+ InterpreterFrame* start) {
+ /* This should only called during live debugging. */
+ ScriptFrameIter i(cx);
+ if (!start) {
+ if (i.done()) {
+ out.printf("no stack for cx = %p\n", (void*)cx);
+ return;
+ }
+ } else {
+ while (!i.done() && !i.isJSJit() && i.interpFrame() != start) {
+ ++i;
+ }
+
+ if (i.done()) {
+ out.printf("fp = %p not found in cx = %p\n", (void*)start, (void*)cx);
+ return;
+ }
+ }
+
+ for (; !i.done(); ++i) {
+ if (i.isJSJit()) {
+ out.put("JIT frame\n");
+ } else {
+ out.printf("InterpreterFrame at %p\n", (void*)i.interpFrame());
+ }
+
+ if (i.isFunctionFrame()) {
+ out.put("callee fun: ");
+ RootedValue v(cx);
+ JSObject* fun = i.callee(cx);
+ v.setObject(*fun);
+ dumpValue(v, out);
+ } else {
+ out.put("global or eval frame, no callee");
+ }
+ out.putChar('\n');
+
+ out.printf("file %s line %u\n", i.script()->filename(),
+ i.script()->lineno());
+
+ if (jsbytecode* pc = i.pc()) {
+ out.printf(" pc = %p\n", pc);
+ out.printf(" current op: %s\n", CodeName(JSOp(*pc)));
+ MaybeDumpScope(i.script()->lookupScope(pc), out);
+ }
+ if (i.isFunctionFrame()) {
+ MaybeDumpValue("this", i.thisArgument(cx), out);
+ }
+ if (!i.isJSJit()) {
+ out.put(" rval: ");
+ dumpValue(i.interpFrame()->returnValue(), out);
+ out.putChar('\n');
+ }
+
+ out.put(" flags:");
+ if (i.isConstructing()) {
+ out.put(" constructing");
+ }
+ if (!i.isJSJit() && i.interpFrame()->isDebuggerEvalFrame()) {
+ out.put(" debugger eval");
+ }
+ if (i.isEvalFrame()) {
+ out.put(" eval");
+ }
+ out.putChar('\n');
+
+ out.printf(" envChain: (JSObject*) %p\n", (void*)i.environmentChain(cx));
+
+ out.putChar('\n');
+ }
+}
+
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
+
+JS_PUBLIC_API void js::DumpBacktrace(JSContext* cx, FILE* fp) {
+ Fprinter out(fp);
+ js::DumpBacktrace(cx, out);
+}
+
+JS_PUBLIC_API void js::DumpBacktrace(JSContext* cx, js::GenericPrinter& out) {
+ size_t depth = 0;
+ for (AllFramesIter i(cx); !i.done(); ++i, ++depth) {
+ const char* filename;
+ unsigned line;
+ if (i.hasScript()) {
+ filename = JS_GetScriptFilename(i.script());
+ line = PCToLineNumber(i.script(), i.pc());
+ } else {
+ filename = i.filename();
+ line = i.computeLine();
+ }
+ char frameType = i.isInterp() ? 'i'
+ : i.isBaseline() ? 'b'
+ : i.isIon() ? 'I'
+ : i.isWasm() ? 'W'
+ : '?';
+
+ out.printf("#%zu %14p %c %s:%u", depth, i.rawFramePtr(), frameType,
+ filename, line);
+
+ if (i.hasScript()) {
+ out.printf(" (%p @ %zu)\n", i.script(), i.script()->pcToOffset(i.pc()));
+ } else {
+ out.printf(" (%p)\n", i.pc());
+ }
+ }
+}
+
+JS_PUBLIC_API void js::DumpBacktrace(JSContext* cx) {
+ DumpBacktrace(cx, stdout);
+}
+
+/* * */
+
+bool JSObject::isBackgroundFinalized() const {
+ if (isTenured()) {
+ return js::gc::IsBackgroundFinalized(asTenured().getAllocKind());
+ }
+
+ js::Nursery& nursery = runtimeFromMainThread()->gc.nursery();
+ return js::gc::IsBackgroundFinalized(allocKindForTenure(nursery));
+}
+
+js::gc::AllocKind JSObject::allocKindForTenure(
+ const js::Nursery& nursery) const {
+ using namespace js::gc;
+
+ MOZ_ASSERT(IsInsideNursery(this));
+
+ if (canHaveFixedElements()) {
+ const NativeObject& nobj = as<NativeObject>();
+ MOZ_ASSERT(nobj.numFixedSlots() == 0);
+
+ /* Use minimal size object if we are just going to copy the pointer. */
+ if (!nursery.isInside(nobj.getUnshiftedElementsHeader())) {
+ return gc::AllocKind::OBJECT0_BACKGROUND;
+ }
+
+ size_t nelements = nobj.getDenseCapacity();
+ return ForegroundToBackgroundAllocKind(GetGCArrayKind(nelements));
+ }
+
+ if (is<JSFunction>()) {
+ return as<JSFunction>().getAllocKind();
+ }
+
+ /*
+ * Typed arrays in the nursery may have a lazily allocated buffer, make
+ * sure there is room for the array's fixed data when moving the array.
+ */
+ if (is<TypedArrayObject>() && !as<TypedArrayObject>().hasBuffer()) {
+ gc::AllocKind allocKind;
+ if (as<TypedArrayObject>().hasInlineElements()) {
+ size_t nbytes = as<TypedArrayObject>().byteLength();
+ allocKind = TypedArrayObject::AllocKindForLazyBuffer(nbytes);
+ } else {
+ allocKind = GetGCObjectKind(getClass());
+ }
+ return ForegroundToBackgroundAllocKind(allocKind);
+ }
+
+ // Proxies that are CrossCompartmentWrappers may be nursery allocated.
+ if (is<ProxyObject>()) {
+ return as<ProxyObject>().allocKindForTenure();
+ }
+
+ // WasmStructObjects have a variable-length tail which contains the first
+ // few data fields, so make sure we copy it all over to the new object.
+ if (is<WasmStructObject>()) {
+ // Figure out the size of this object, from the object's TypeDef.
+ const wasm::TypeDef* typeDef = &as<WasmStructObject>().typeDef();
+ return WasmStructObject::allocKindForTypeDef(typeDef);
+ }
+
+ if (is<WasmArrayObject>()) {
+ return WasmArrayObject::allocKind();
+ }
+
+ // All nursery allocatable non-native objects are handled above.
+ return as<NativeObject>().allocKindForTenure();
+}
+
+void JSObject::addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::ClassInfo* info,
+ JS::RuntimeSizes* runtimeSizes) {
+ if (is<NativeObject>() && as<NativeObject>().hasDynamicSlots()) {
+ info->objectsMallocHeapSlots +=
+ mallocSizeOf(as<NativeObject>().getSlotsHeader());
+ }
+
+ if (is<NativeObject>() && as<NativeObject>().hasDynamicElements()) {
+ void* allocatedElements = as<NativeObject>().getUnshiftedElementsHeader();
+ info->objectsMallocHeapElementsNormal += mallocSizeOf(allocatedElements);
+ }
+
+ // Other things may be measured in the future if DMD indicates it is
+ // worthwhile.
+ if (is<JSFunction>() || is<PlainObject>() || is<ArrayObject>() ||
+ is<CallObject>() || is<RegExpObject>() || is<ProxyObject>()) {
+ // Do nothing. But this function is hot, and we win by getting the
+ // common cases out of the way early. Some stats on the most common
+ // classes, as measured during a vanilla browser session:
+ // - (53.7%, 53.7%): Function
+ // - (18.0%, 71.7%): Object
+ // - (16.9%, 88.6%): Array
+ // - ( 3.9%, 92.5%): Call
+ // - ( 2.8%, 95.3%): RegExp
+ // - ( 1.0%, 96.4%): Proxy
+
+ // Note that any JSClass that is special cased below likely needs to
+ // specify the JSCLASS_DELAY_METADATA_BUILDER flag, or else we will
+ // probably crash if the object metadata callback attempts to get the
+ // size of the new object (which Debugger code does) before private
+ // slots are initialized.
+ } else if (is<ArgumentsObject>()) {
+ info->objectsMallocHeapMisc +=
+ as<ArgumentsObject>().sizeOfMisc(mallocSizeOf);
+ } else if (is<MapObject>()) {
+ info->objectsMallocHeapMisc += as<MapObject>().sizeOfData(mallocSizeOf);
+ } else if (is<SetObject>()) {
+ info->objectsMallocHeapMisc += as<SetObject>().sizeOfData(mallocSizeOf);
+ } else if (is<PropertyIteratorObject>()) {
+ info->objectsMallocHeapMisc +=
+ as<PropertyIteratorObject>().sizeOfMisc(mallocSizeOf);
+ } else if (is<ArrayBufferObject>()) {
+ ArrayBufferObject::addSizeOfExcludingThis(this, mallocSizeOf, info,
+ runtimeSizes);
+ } else if (is<SharedArrayBufferObject>()) {
+ SharedArrayBufferObject::addSizeOfExcludingThis(this, mallocSizeOf, info,
+ runtimeSizes);
+ } else if (is<GlobalObject>()) {
+ as<GlobalObject>().addSizeOfData(mallocSizeOf, info);
+ } else if (is<WeakCollectionObject>()) {
+ info->objectsMallocHeapMisc +=
+ as<WeakCollectionObject>().sizeOfExcludingThis(mallocSizeOf);
+ }
+#ifdef JS_HAS_CTYPES
+ else {
+ // This must be the last case.
+ info->objectsMallocHeapMisc += ctypes::SizeOfDataIfCDataObject(
+ mallocSizeOf, const_cast<JSObject*>(this));
+ }
+#endif
+}
+
+size_t JSObject::sizeOfIncludingThisInNursery() const {
+ // This function doesn't concern itself yet with typed objects (bug 1133593).
+
+ MOZ_ASSERT(!isTenured());
+
+ const Nursery& nursery = runtimeFromMainThread()->gc.nursery();
+ size_t size = gc::Arena::thingSize(allocKindForTenure(nursery));
+
+ if (is<NativeObject>()) {
+ const NativeObject& native = as<NativeObject>();
+
+ size += native.numDynamicSlots() * sizeof(Value);
+
+ if (native.hasDynamicElements()) {
+ js::ObjectElements& elements = *native.getElementsHeader();
+ size += (elements.capacity + elements.numShiftedElements()) *
+ sizeof(HeapSlot);
+ }
+
+ if (is<ArgumentsObject>()) {
+ size += as<ArgumentsObject>().sizeOfData();
+ }
+ }
+
+ return size;
+}
+
+JS::ubi::Node::Size JS::ubi::Concrete<JSObject>::size(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ JSObject& obj = get();
+
+ if (!obj.isTenured()) {
+ return obj.sizeOfIncludingThisInNursery();
+ }
+
+ JS::ClassInfo info;
+ obj.addSizeOfExcludingThis(mallocSizeOf, &info, nullptr);
+ return obj.tenuredSizeOfThis() + info.sizeOfAllThings();
+}
+
+const char16_t JS::ubi::Concrete<JSObject>::concreteTypeName[] = u"JSObject";
+
+void JSObject::traceChildren(JSTracer* trc) {
+ TraceCellHeaderEdge(trc, this, "shape");
+
+ Shape* objShape = shape();
+ if (objShape->isNative()) {
+ NativeObject* nobj = &as<NativeObject>();
+
+ {
+ GetObjectSlotNameFunctor func(nobj);
+ JS::AutoTracingDetails ctx(trc, func);
+ JS::AutoTracingIndex index(trc);
+ // Tracing can mutate the target but cannot change the slot count,
+ // but the compiler has no way of knowing this.
+ const uint32_t nslots = nobj->slotSpan();
+ for (uint32_t i = 0; i < nslots; ++i) {
+ TraceEdge(trc, &nobj->getSlotRef(i), "object slot");
+ ++index;
+ }
+ MOZ_ASSERT(nslots == nobj->slotSpan());
+ }
+
+ TraceRange(trc, nobj->getDenseInitializedLength(),
+ static_cast<HeapSlot*>(nobj->getDenseElements()),
+ "objectElements");
+ }
+
+ // Call the trace hook at the end so that during a moving GC the trace hook
+ // will see updated fields and slots.
+ const JSClass* clasp = objShape->getObjectClass();
+ if (clasp->hasTrace()) {
+ clasp->doTrace(trc, this);
+ }
+}
+
+// ES 2016 7.3.20.
+[[nodiscard]] JSObject* js::SpeciesConstructor(
+ JSContext* cx, HandleObject obj, HandleObject defaultCtor,
+ bool (*isDefaultSpecies)(JSContext*, JSFunction*)) {
+ // Step 1 (implicit).
+
+ // Fast-path for steps 2 - 8. Applies if all of the following conditions
+ // are met:
+ // - obj.constructor can be retrieved without side-effects.
+ // - obj.constructor[[@@species]] can be retrieved without side-effects.
+ // - obj.constructor[[@@species]] is the builtin's original @@species
+ // getter.
+ RootedValue ctor(cx);
+ bool ctorGetSucceeded = GetPropertyPure(
+ cx, obj, NameToId(cx->names().constructor), ctor.address());
+ if (ctorGetSucceeded && ctor.isObject() && &ctor.toObject() == defaultCtor) {
+ jsid speciesId = PropertyKey::Symbol(cx->wellKnownSymbols().species);
+ JSFunction* getter;
+ if (GetGetterPure(cx, defaultCtor, speciesId, &getter) && getter &&
+ isDefaultSpecies(cx, getter)) {
+ return defaultCtor;
+ }
+ }
+
+ // Step 2.
+ if (!ctorGetSucceeded &&
+ !GetProperty(cx, obj, obj, cx->names().constructor, &ctor)) {
+ return nullptr;
+ }
+
+ // Step 3.
+ if (ctor.isUndefined()) {
+ return defaultCtor;
+ }
+
+ // Step 4.
+ if (!ctor.isObject()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_OBJECT_REQUIRED,
+ "object's 'constructor' property");
+ return nullptr;
+ }
+
+ // Step 5.
+ RootedObject ctorObj(cx, &ctor.toObject());
+ RootedValue s(cx);
+ RootedId speciesId(cx, PropertyKey::Symbol(cx->wellKnownSymbols().species));
+ if (!GetProperty(cx, ctorObj, ctor, speciesId, &s)) {
+ return nullptr;
+ }
+
+ // Step 6.
+ if (s.isNullOrUndefined()) {
+ return defaultCtor;
+ }
+
+ // Step 7.
+ if (IsConstructor(s)) {
+ return &s.toObject();
+ }
+
+ // Step 8.
+ JS_ReportErrorNumberASCII(
+ cx, GetErrorMessage, nullptr, JSMSG_NOT_CONSTRUCTOR,
+ "[Symbol.species] property of object's constructor");
+ return nullptr;
+}
+
+[[nodiscard]] JSObject* js::SpeciesConstructor(
+ JSContext* cx, HandleObject obj, JSProtoKey ctorKey,
+ bool (*isDefaultSpecies)(JSContext*, JSFunction*)) {
+ RootedObject defaultCtor(cx,
+ GlobalObject::getOrCreateConstructor(cx, ctorKey));
+ if (!defaultCtor) {
+ return nullptr;
+ }
+ return SpeciesConstructor(cx, obj, defaultCtor, isDefaultSpecies);
+}
+
+bool js::Unbox(JSContext* cx, HandleObject obj, MutableHandleValue vp) {
+ if (MOZ_UNLIKELY(obj->is<ProxyObject>())) {
+ return Proxy::boxedValue_unbox(cx, obj, vp);
+ }
+
+ if (obj->is<BooleanObject>()) {
+ vp.setBoolean(obj->as<BooleanObject>().unbox());
+ } else if (obj->is<NumberObject>()) {
+ vp.setNumber(obj->as<NumberObject>().unbox());
+ } else if (obj->is<StringObject>()) {
+ vp.setString(obj->as<StringObject>().unbox());
+ } else if (obj->is<DateObject>()) {
+ vp.set(obj->as<DateObject>().UTCTime());
+ } else if (obj->is<SymbolObject>()) {
+ vp.setSymbol(obj->as<SymbolObject>().unbox());
+ } else if (obj->is<BigIntObject>()) {
+ vp.setBigInt(obj->as<BigIntObject>().unbox());
+#ifdef ENABLE_RECORD_TUPLE
+ } else if (obj->is<RecordObject>()) {
+ vp.setExtendedPrimitive(*obj->as<RecordObject>().unbox());
+ } else if (obj->is<TupleObject>()) {
+ vp.setExtendedPrimitive(obj->as<TupleObject>().unbox());
+#endif
+ } else {
+ vp.setUndefined();
+ }
+
+ return true;
+}
+
+#ifdef DEBUG
+void js::AssertJSClassInvariants(const JSClass* clasp) {
+ MOZ_ASSERT(JS::StringIsASCII(clasp->name));
+
+ // Native objects shouldn't use the property operation hooks in ObjectOps.
+ // Doing so could violate JIT invariants.
+ //
+ // Environment objects unfortunately use these hooks, but environment objects
+ // are not exposed directly to script so they're generally less of an issue.
+ if (clasp->isNativeObject() && clasp != &WithEnvironmentObject::class_ &&
+ clasp != &ModuleEnvironmentObject::class_ &&
+ clasp != &RuntimeLexicalErrorObject::class_) {
+ MOZ_ASSERT(!clasp->getOpsLookupProperty());
+ MOZ_ASSERT_IF(clasp != &MappedArgumentsObject::class_,
+ !clasp->getOpsDefineProperty());
+ MOZ_ASSERT(!clasp->getOpsHasProperty());
+ MOZ_ASSERT(!clasp->getOpsGetProperty());
+ MOZ_ASSERT(!clasp->getOpsSetProperty());
+ MOZ_ASSERT(!clasp->getOpsGetOwnPropertyDescriptor());
+ MOZ_ASSERT(!clasp->getOpsDeleteProperty());
+ }
+}
+
+/* static */
+void JSObject::debugCheckNewObject(Shape* shape, js::gc::AllocKind allocKind,
+ js::gc::Heap heap) {
+ const JSClass* clasp = shape->getObjectClass();
+
+ if (!ClassCanHaveFixedData(clasp)) {
+ NativeShape* nshape = &shape->asNative();
+ if (clasp == &ArrayObject::class_) {
+ // Arrays can store the ObjectElements header inline.
+ MOZ_ASSERT(nshape->numFixedSlots() == 0);
+ } else {
+ MOZ_ASSERT(gc::GetGCKindSlots(allocKind) == nshape->numFixedSlots());
+ }
+ }
+
+ // Assert background finalization is used when possible.
+ MOZ_ASSERT(!CanChangeToBackgroundAllocKind(allocKind, clasp));
+
+ // Classes with a finalizer must specify whether instances will be finalized
+ // on the main thread or in the background, except proxies whose behaviour
+ // depends on the target object.
+ static const uint32_t FinalizeMask =
+ JSCLASS_FOREGROUND_FINALIZE | JSCLASS_BACKGROUND_FINALIZE;
+ uint32_t flags = clasp->flags;
+ uint32_t finalizeFlags = flags & FinalizeMask;
+ if (clasp->hasFinalize() && !clasp->isProxyObject()) {
+ MOZ_ASSERT(finalizeFlags == JSCLASS_FOREGROUND_FINALIZE ||
+ finalizeFlags == JSCLASS_BACKGROUND_FINALIZE);
+ MOZ_ASSERT((finalizeFlags == JSCLASS_BACKGROUND_FINALIZE) ==
+ IsBackgroundFinalized(allocKind));
+ } else {
+ MOZ_ASSERT(finalizeFlags == 0);
+ }
+
+ MOZ_ASSERT_IF(clasp->hasFinalize(),
+ heap == gc::Heap::Tenured ||
+ CanNurseryAllocateFinalizedClass(clasp) ||
+ clasp->isProxyObject());
+
+ MOZ_ASSERT(!shape->isDictionary());
+ MOZ_ASSERT(!shape->realm()->hasObjectPendingMetadata());
+
+ // Non-native classes manage their own data and slots, so numFixedSlots is
+ // always 0. Note that proxy classes can have reserved slots but they're not
+ // included in numFixedSlots.
+ if (!clasp->isNativeObject()) {
+ MOZ_ASSERT_IF(!clasp->isProxyObject(), JSCLASS_RESERVED_SLOTS(clasp) == 0);
+ }
+}
+#endif
diff --git a/js/src/vm/JSObject.h b/js/src/vm/JSObject.h
new file mode 100644
index 0000000000..91128bc42f
--- /dev/null
+++ b/js/src/vm/JSObject.h
@@ -0,0 +1,1099 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_JSObject_h
+#define vm_JSObject_h
+
+#include "mozilla/MemoryReporting.h"
+
+#include "jsfriendapi.h"
+
+#include "js/friend/ErrorMessages.h" // JSErrNum
+#include "js/GCVector.h"
+#include "js/shadow/Zone.h" // JS::shadow::Zone
+#include "js/Wrapper.h"
+#include "vm/Shape.h"
+
+namespace JS {
+struct ClassInfo;
+} // namespace JS
+
+namespace js {
+
+using PropertyDescriptorVector = JS::GCVector<JS::PropertyDescriptor>;
+class GCMarker;
+class Nursery;
+struct AutoEnterOOMUnsafeRegion;
+
+namespace gc {
+class RelocationOverlay;
+} // namespace gc
+
+/****************************************************************************/
+
+class GlobalObject;
+class NativeObject;
+
+enum class IntegrityLevel { Sealed, Frozen };
+
+/*
+ * The NewObjectKind allows an allocation site to specify the lifetime
+ * requirements that must be fixed at allocation time.
+ */
+enum NewObjectKind {
+ /* This is the default. Most objects are generic. */
+ GenericObject,
+
+ /*
+ * Objects which will not benefit from being allocated in the nursery
+ * (e.g. because they are known to have a long lifetime) may be allocated
+ * with this kind to place them immediately into the tenured generation.
+ */
+ TenuredObject
+};
+
+// Forward declarations, required for later friend declarations.
+bool PreventExtensions(JSContext* cx, JS::HandleObject obj,
+ JS::ObjectOpResult& result);
+bool SetImmutablePrototype(JSContext* cx, JS::HandleObject obj,
+ bool* succeeded);
+
+} /* namespace js */
+
+/*
+ * [SMDOC] JSObject layout
+ *
+ * A JavaScript object.
+ *
+ * This is the base class for all objects exposed to JS script (as well as some
+ * objects that are only accessed indirectly). Subclasses add additional fields
+ * and execution semantics. The runtime class of an arbitrary JSObject is
+ * identified by JSObject::getClass().
+ *
+ * All objects have a non-null Shape, stored in the cell header, which describes
+ * the current layout and set of property keys of the object.
+ *
+ * Each Shape has a pointer to a BaseShape. The BaseShape contains the object's
+ * prototype object, its class, and its realm.
+ *
+ * NOTE: Some operations can change the contents of an object (including class)
+ * in-place so avoid assuming an object with same pointer has same class
+ * as before.
+ * - JSObject::swap()
+ */
+class JSObject
+ : public js::gc::CellWithTenuredGCPointer<js::gc::Cell, js::Shape> {
+ public:
+ // The Shape is stored in the cell header.
+ js::Shape* shape() const { return headerPtr(); }
+
+ // Like shape(), but uses getAtomic to read the header word.
+ js::Shape* shapeMaybeForwarded() const { return headerPtrAtomic(); }
+
+#ifndef JS_64BIT
+ // Ensure fixed slots have 8-byte alignment on 32-bit platforms.
+ uint32_t padding_;
+#endif
+
+ private:
+ friend class js::GCMarker;
+ friend class js::GlobalObject;
+ friend class js::Nursery;
+ friend class js::gc::RelocationOverlay;
+ friend bool js::PreventExtensions(JSContext* cx, JS::HandleObject obj,
+ JS::ObjectOpResult& result);
+ friend bool js::SetImmutablePrototype(JSContext* cx, JS::HandleObject obj,
+ bool* succeeded);
+
+ public:
+ const JSClass* getClass() const { return shape()->getObjectClass(); }
+ bool hasClass(const JSClass* c) const { return getClass() == c; }
+
+ js::LookupPropertyOp getOpsLookupProperty() const {
+ return getClass()->getOpsLookupProperty();
+ }
+ js::DefinePropertyOp getOpsDefineProperty() const {
+ return getClass()->getOpsDefineProperty();
+ }
+ js::HasPropertyOp getOpsHasProperty() const {
+ return getClass()->getOpsHasProperty();
+ }
+ js::GetPropertyOp getOpsGetProperty() const {
+ return getClass()->getOpsGetProperty();
+ }
+ js::SetPropertyOp getOpsSetProperty() const {
+ return getClass()->getOpsSetProperty();
+ }
+ js::GetOwnPropertyOp getOpsGetOwnPropertyDescriptor() const {
+ return getClass()->getOpsGetOwnPropertyDescriptor();
+ }
+ js::DeletePropertyOp getOpsDeleteProperty() const {
+ return getClass()->getOpsDeleteProperty();
+ }
+ js::GetElementsOp getOpsGetElements() const {
+ return getClass()->getOpsGetElements();
+ }
+ JSFunToStringOp getOpsFunToString() const {
+ return getClass()->getOpsFunToString();
+ }
+
+ JS::Compartment* compartment() const { return shape()->compartment(); }
+ JS::Compartment* maybeCompartment() const { return compartment(); }
+
+ void initShape(js::Shape* shape) {
+ // Note: use Cell::Zone() instead of zone() because zone() relies on the
+ // shape we still have to initialize.
+ MOZ_ASSERT(Cell::zone() == shape->zone());
+ initHeaderPtr(shape);
+ }
+ void setShape(js::Shape* shape) {
+ MOZ_ASSERT(maybeCCWRealm() == shape->realm());
+ setHeaderPtr(shape);
+ }
+
+ static bool setFlag(JSContext* cx, JS::HandleObject obj, js::ObjectFlag flag);
+
+ bool hasFlag(js::ObjectFlag flag) const {
+ return shape()->hasObjectFlag(flag);
+ }
+
+ bool hasAnyFlag(js::ObjectFlags flags) const {
+ return shape()->objectFlags().hasAnyFlag(flags);
+ }
+
+ // Change this object's shape for a prototype mutation.
+ //
+ // Note: the caller must ensure the object has a mutable proto, is extensible,
+ // etc.
+ static bool setProtoUnchecked(JSContext* cx, JS::HandleObject obj,
+ js::Handle<js::TaggedProto> proto);
+
+ // An object is marked IsUsedAsPrototype if it is (or was) another object's
+ // prototype. Optimization heuristics will make use of this flag.
+ //
+ // This flag is only relevant for static prototypes. Proxy traps can return
+ // objects without this flag set.
+ //
+ // NOTE: it's important to call setIsUsedAsPrototype *after* initializing the
+ // object's properties, because that avoids unnecessary shadowing checks and
+ // reshaping.
+ //
+ // See: ReshapeForProtoMutation, ReshapeForShadowedProp
+ bool isUsedAsPrototype() const {
+ return hasFlag(js::ObjectFlag::IsUsedAsPrototype);
+ }
+ static bool setIsUsedAsPrototype(JSContext* cx, JS::HandleObject obj) {
+ return setFlag(cx, obj, js::ObjectFlag::IsUsedAsPrototype);
+ }
+
+ bool useWatchtowerTestingLog() const {
+ return hasFlag(js::ObjectFlag::UseWatchtowerTestingLog);
+ }
+ static bool setUseWatchtowerTestingLog(JSContext* cx, JS::HandleObject obj) {
+ return setFlag(cx, obj, js::ObjectFlag::UseWatchtowerTestingLog);
+ }
+
+ bool isGenerationCountedGlobal() const {
+ return hasFlag(js::ObjectFlag::GenerationCountedGlobal);
+ }
+ static bool setGenerationCountedGlobal(JSContext* cx, JS::HandleObject obj) {
+ return setFlag(cx, obj, js::ObjectFlag::GenerationCountedGlobal);
+ }
+
+ // A "qualified" varobj is the object on which "qualified" variable
+ // declarations (i.e., those defined with "var") are kept.
+ //
+ // Conceptually, when a var binding is defined, it is defined on the
+ // innermost qualified varobj on the scope chain.
+ //
+ // Function scopes (CallObjects) are qualified varobjs, and there can be
+ // no other qualified varobj that is more inner for var bindings in that
+ // function. As such, all references to local var bindings in a function
+ // may be statically bound to the function scope. This is subject to
+ // further optimization. Unaliased bindings inside functions reside
+ // entirely on the frame, not in CallObjects.
+ //
+ // Global scopes are also qualified varobjs. It is possible to statically
+ // know, for a given script, that are no more inner qualified varobjs, so
+ // free variable references can be statically bound to the global.
+ //
+ // Finally, there are non-syntactic qualified varobjs used by embedders
+ // (e.g., Gecko and XPConnect), as they often wish to run scripts under a
+ // scope that captures var bindings.
+ inline bool isQualifiedVarObj() const;
+ static bool setQualifiedVarObj(JSContext* cx, JS::HandleObject obj) {
+ return setFlag(cx, obj, js::ObjectFlag::QualifiedVarObj);
+ }
+
+ // An "unqualified" varobj is the object on which "unqualified"
+ // assignments (i.e., bareword assignments for which the LHS does not
+ // exist on the scope chain) are kept.
+ inline bool isUnqualifiedVarObj() const;
+
+ // Once the "invalidated teleporting" flag is set for an object, it is never
+ // cleared and it may cause the JITs to insert additional guards when
+ // accessing properties on this object. While the flag remains clear, the
+ // shape teleporting optimization can be used to avoid those extra checks.
+ //
+ // The flag is set on the object if either:
+ //
+ // * Its own proto was mutated or it was on the proto chain of an object that
+ // had its proto mutated.
+ //
+ // * It was on the proto chain of an object that started shadowing a property
+ // on this object.
+ //
+ // See:
+ // - ReshapeForProtoMutation
+ // - ReshapeForShadowedProp
+ // - ProtoChainSupportsTeleporting
+ inline bool hasInvalidatedTeleporting() const;
+ static bool setInvalidatedTeleporting(JSContext* cx, JS::HandleObject obj) {
+ MOZ_ASSERT(obj->isUsedAsPrototype());
+ MOZ_ASSERT(obj->hasStaticPrototype(),
+ "teleporting as a concept is only applicable to static "
+ "(not dynamically-computed) prototypes");
+ return setFlag(cx, obj, js::ObjectFlag::InvalidatedTeleporting);
+ }
+
+ /*
+ * Whether there may be "interesting symbol" properties on this object. An
+ * interesting symbol is a symbol for which symbol->isInterestingSymbol()
+ * returns true.
+ */
+ MOZ_ALWAYS_INLINE bool maybeHasInterestingSymbolProperty() const;
+
+ /* GC support. */
+
+ void traceChildren(JSTracer* trc);
+
+ void fixupAfterMovingGC() {}
+
+ static const JS::TraceKind TraceKind = JS::TraceKind::Object;
+
+ MOZ_ALWAYS_INLINE JS::Zone* zone() const {
+ MOZ_ASSERT_IF(!isTenured(), nurseryZone() == shape()->zone());
+ return shape()->zone();
+ }
+ MOZ_ALWAYS_INLINE JS::shadow::Zone* shadowZone() const {
+ return JS::shadow::Zone::from(zone());
+ }
+ MOZ_ALWAYS_INLINE JS::Zone* zoneFromAnyThread() const {
+ MOZ_ASSERT_IF(!isTenured(),
+ nurseryZoneFromAnyThread() == shape()->zoneFromAnyThread());
+ return shape()->zoneFromAnyThread();
+ }
+ MOZ_ALWAYS_INLINE JS::shadow::Zone* shadowZoneFromAnyThread() const {
+ return JS::shadow::Zone::from(zoneFromAnyThread());
+ }
+ static MOZ_ALWAYS_INLINE void postWriteBarrier(void* cellp, JSObject* prev,
+ JSObject* next) {
+ js::gc::PostWriteBarrierImpl<JSObject>(cellp, prev, next);
+ }
+
+ /* Return the allocKind we would use if we were to tenure this object. */
+ js::gc::AllocKind allocKindForTenure(const js::Nursery& nursery) const;
+
+ bool canHaveFixedElements() const;
+
+ size_t tenuredSizeOfThis() const {
+ MOZ_ASSERT(isTenured());
+ return js::gc::Arena::thingSize(asTenured().getAllocKind());
+ }
+
+ void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::ClassInfo* info,
+ JS::RuntimeSizes* runtimeSizes);
+
+ // We can only use addSizeOfExcludingThis on tenured objects: it assumes it
+ // can apply mallocSizeOf to bits and pieces of the object, whereas objects
+ // in the nursery may have those bits and pieces allocated in the nursery
+ // along with them, and are not each their own malloc blocks.
+ size_t sizeOfIncludingThisInNursery() const;
+
+#ifdef DEBUG
+ static void debugCheckNewObject(js::Shape* shape, js::gc::AllocKind allocKind,
+ js::gc::Heap heap);
+#else
+ static void debugCheckNewObject(js::Shape* shape, js::gc::AllocKind allocKind,
+ js::gc::Heap heap) {}
+#endif
+
+ /*
+ * We permit proxies to dynamically compute their prototype if desired.
+ * (Not all proxies will so desire: in particular, most DOM proxies can
+ * track their prototype with a single, nullable JSObject*.) If a proxy
+ * so desires, we store (JSObject*)0x1 in the proto field of the object's
+ * group.
+ *
+ * We offer three ways to get an object's prototype:
+ *
+ * 1. obj->staticPrototype() returns the prototype, but it asserts if obj
+ * is a proxy, and the proxy has opted to dynamically compute its
+ * prototype using a getPrototype() handler.
+ * 2. obj->taggedProto() returns a TaggedProto, which can be tested to
+ * check if the proto is an object, nullptr, or lazily computed.
+ * 3. js::GetPrototype(cx, obj, &proto) computes the proto of an object.
+ * If obj is a proxy with dynamically-computed prototype, this code may
+ * perform arbitrary behavior (allocation, GC, run JS) while computing
+ * the proto.
+ */
+
+ js::TaggedProto taggedProto() const { return shape()->proto(); }
+
+ bool uninlinedIsProxyObject() const;
+
+ JSObject* staticPrototype() const {
+ MOZ_ASSERT(hasStaticPrototype());
+ return taggedProto().toObjectOrNull();
+ }
+
+ // Normal objects and a subset of proxies have an uninteresting, static
+ // (albeit perhaps mutable) [[Prototype]]. For such objects the
+ // [[Prototype]] is just a value returned when needed for accesses, or
+ // modified in response to requests. These objects store the
+ // [[Prototype]] directly within |obj->group()|.
+ bool hasStaticPrototype() const { return !hasDynamicPrototype(); }
+
+ // The remaining proxies have a [[Prototype]] requiring dynamic computation
+ // for every access, going through the proxy handler {get,set}Prototype and
+ // setImmutablePrototype methods. (Wrappers particularly use this to keep
+ // the wrapper/wrappee [[Prototype]]s consistent.)
+ bool hasDynamicPrototype() const {
+ bool dynamic = taggedProto().isDynamic();
+ MOZ_ASSERT_IF(dynamic, uninlinedIsProxyObject());
+ return dynamic;
+ }
+
+ // True iff this object's [[Prototype]] is immutable. Must be called only
+ // on objects with a static [[Prototype]]!
+ inline bool staticPrototypeIsImmutable() const;
+
+ /*
+ * Environment chains.
+ *
+ * The environment chain of an object is the link in the search path when
+ * a script does a name lookup on an environment object. For JS internal
+ * environment objects --- Call, LexicalEnvironment, and WithEnvironment
+ * --- the chain is stored in the first fixed slot of the object. For
+ * other environment objects, the chain goes directly to the global.
+ *
+ * In code which is not marked hasNonSyntacticScope, environment chains
+ * can contain only syntactic environment objects (see
+ * IsSyntacticEnvironment) with a global object at the root as the
+ * environment of the outermost non-function script. In
+ * hasNonSyntacticScope code, the environment of the outermost
+ * non-function script might not be a global object, and can have a mix of
+ * other objects above it before the global object is reached.
+ */
+
+ /*
+ * Get the enclosing environment of an object. When called on a
+ * non-EnvironmentObject, this will just be the global (the name
+ * "enclosing environment" still applies in this situation because
+ * non-EnvironmentObjects can be on the environment chain).
+ */
+ inline JSObject* enclosingEnvironment() const;
+
+ // Cross-compartment wrappers are not associated with a single realm/global,
+ // so these methods assert the object is not a CCW.
+ inline js::GlobalObject& nonCCWGlobal() const;
+
+ JS::Realm* nonCCWRealm() const {
+ MOZ_ASSERT(!js::UninlinedIsCrossCompartmentWrapper(this));
+ return shape()->realm();
+ }
+ bool hasSameRealmAs(JSContext* cx) const;
+
+ // Returns the object's realm even if the object is a CCW (be careful, in
+ // this case the realm is not very meaningful because wrappers are shared by
+ // all realms in the compartment).
+ JS::Realm* maybeCCWRealm() const { return shape()->realm(); }
+
+ /*
+ * ES5 meta-object properties and operations.
+ */
+
+ public:
+ // Indicates whether a non-proxy is extensible. Don't call on proxies!
+ // This method really shouldn't exist -- but there are a few internal
+ // places that want it (JITs and the like), and it'd be a pain to mark them
+ // all as friends.
+ inline bool nonProxyIsExtensible() const;
+ bool uninlinedNonProxyIsExtensible() const;
+
+ public:
+ /*
+ * Back to generic stuff.
+ */
+ MOZ_ALWAYS_INLINE bool isCallable() const;
+ MOZ_ALWAYS_INLINE bool isConstructor() const;
+ MOZ_ALWAYS_INLINE JSNative callHook() const;
+ MOZ_ALWAYS_INLINE JSNative constructHook() const;
+
+ bool isBackgroundFinalized() const;
+
+ MOZ_ALWAYS_INLINE void finalize(JS::GCContext* gcx);
+
+ public:
+ static bool nonNativeSetProperty(JSContext* cx, js::HandleObject obj,
+ js::HandleId id, js::HandleValue v,
+ js::HandleValue receiver,
+ JS::ObjectOpResult& result);
+ static bool nonNativeSetElement(JSContext* cx, js::HandleObject obj,
+ uint32_t index, js::HandleValue v,
+ js::HandleValue receiver,
+ JS::ObjectOpResult& result);
+
+ static void swap(JSContext* cx, JS::HandleObject a, JS::HandleObject b,
+ js::AutoEnterOOMUnsafeRegion& oomUnsafe);
+
+ /*
+ * In addition to the generic object interface provided by JSObject,
+ * specific types of objects may provide additional operations. To access,
+ * these addition operations, callers should use the pattern:
+ *
+ * if (obj.is<XObject>()) {
+ * XObject& x = obj.as<XObject>();
+ * x.foo();
+ * }
+ *
+ * These XObject classes form a hierarchy. For example, for a cloned block
+ * object, the following predicates are true: is<ClonedBlockObject>,
+ * is<NestedScopeObject> and is<ScopeObject>. Each of these has a
+ * respective class that derives and adds operations.
+ *
+ * A class XObject is defined in a vm/XObject{.h, .cpp, -inl.h} file
+ * triplet (along with any class YObject that derives XObject).
+ *
+ * Note that X represents a low-level representation and does not query the
+ * [[Class]] property of object defined by the spec: use |JS::GetBuiltinClass|
+ * for this.
+ */
+
+ template <class T>
+ inline bool is() const {
+ return getClass() == &T::class_;
+ }
+
+ template <class T>
+ T& as() {
+ MOZ_ASSERT(this->is<T>());
+ return *static_cast<T*>(this);
+ }
+
+ template <class T>
+ const T& as() const {
+ MOZ_ASSERT(this->is<T>());
+ return *static_cast<const T*>(this);
+ }
+
+ /*
+ * True if either this or CheckedUnwrap(this) is an object of class T.
+ * (Only two objects are checked, regardless of how many wrappers there
+ * are.)
+ *
+ * /!\ Note: This can be true at one point, but false later for the same
+ * object, thanks to js::NukeCrossCompartmentWrapper and friends.
+ */
+ template <class T>
+ bool canUnwrapAs();
+
+ /*
+ * Unwrap and downcast to class T.
+ *
+ * Precondition: `this->canUnwrapAs<T>()`. Note that it's not enough to
+ * have checked this at some point in the past; if there's any doubt as to
+ * whether js::Nuke* could have been called in the meantime, check again.
+ */
+ template <class T>
+ T& unwrapAs();
+
+ /*
+ * Tries to unwrap and downcast to class T. Returns nullptr if (and only if) a
+ * wrapper with a security policy is involved. Crashes in all builds if the
+ * (possibly unwrapped) object is not of class T (for example, because it's a
+ * dead wrapper).
+ */
+ template <class T>
+ inline T* maybeUnwrapAs();
+
+ /*
+ * Tries to unwrap and downcast to an object with class |clasp|. Returns
+ * nullptr if (and only if) a wrapper with a security policy is involved.
+ * Crashes in all builds if the (possibly unwrapped) object doesn't have class
+ * |clasp| (for example, because it's a dead wrapper).
+ */
+ inline JSObject* maybeUnwrapAs(const JSClass* clasp);
+
+ /*
+ * Tries to unwrap and downcast to class T. Returns nullptr if a wrapper with
+ * a security policy is involved or if the object does not have class T.
+ */
+ template <class T>
+ T* maybeUnwrapIf();
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ void dump(js::GenericPrinter& fp) const;
+ void dump() const;
+#endif
+
+ // Maximum size in bytes of a JSObject.
+#ifdef JS_64BIT
+ static constexpr size_t MAX_BYTE_SIZE =
+ 3 * sizeof(void*) + 16 * sizeof(JS::Value);
+#else
+ static constexpr size_t MAX_BYTE_SIZE =
+ 4 * sizeof(void*) + 16 * sizeof(JS::Value);
+#endif
+
+ protected:
+ // JIT Accessors.
+ //
+ // To help avoid writing Spectre-unsafe code, we only allow MacroAssembler
+ // to call the method below.
+ friend class js::jit::MacroAssembler;
+
+ static constexpr size_t offsetOfShape() { return offsetOfHeaderPtr(); }
+
+ private:
+ JSObject(const JSObject& other) = delete;
+ void operator=(const JSObject& other) = delete;
+
+ protected:
+ // For the allocator only, to be used with placement new.
+ friend class js::gc::GCRuntime;
+ JSObject() = default;
+};
+
+template <>
+inline bool JSObject::is<JSObject>() const {
+ return true;
+}
+
+template <typename Wrapper>
+template <typename U>
+MOZ_ALWAYS_INLINE JS::Handle<U*> js::RootedOperations<JSObject*, Wrapper>::as()
+ const {
+ const Wrapper& self = *static_cast<const Wrapper*>(this);
+ MOZ_ASSERT(self->template is<U>());
+ return Handle<U*>::fromMarkedLocation(
+ reinterpret_cast<U* const*>(self.address()));
+}
+
+template <typename Wrapper>
+template <class U>
+MOZ_ALWAYS_INLINE JS::Handle<U*> js::HandleOperations<JSObject*, Wrapper>::as()
+ const {
+ const JS::Handle<JSObject*>& self =
+ *static_cast<const JS::Handle<JSObject*>*>(this);
+ MOZ_ASSERT(self->template is<U>());
+ return Handle<U*>::fromMarkedLocation(
+ reinterpret_cast<U* const*>(self.address()));
+}
+
+template <class T>
+bool JSObject::canUnwrapAs() {
+ static_assert(!std::is_convertible_v<T*, js::Wrapper*>,
+ "T can't be a Wrapper type; this function discards wrappers");
+
+ if (is<T>()) {
+ return true;
+ }
+ JSObject* obj = js::CheckedUnwrapStatic(this);
+ return obj && obj->is<T>();
+}
+
+template <class T>
+T& JSObject::unwrapAs() {
+ static_assert(!std::is_convertible_v<T*, js::Wrapper*>,
+ "T can't be a Wrapper type; this function discards wrappers");
+
+ if (is<T>()) {
+ return as<T>();
+ }
+
+ // Since the caller just called canUnwrapAs<T>(), which does a
+ // CheckedUnwrap, this does not need to repeat the security check.
+ JSObject* unwrapped = js::UncheckedUnwrap(this);
+ MOZ_ASSERT(js::CheckedUnwrapStatic(this) == unwrapped,
+ "check that the security check we skipped really is redundant");
+ return unwrapped->as<T>();
+}
+
+template <class T>
+inline T* JSObject::maybeUnwrapAs() {
+ static_assert(!std::is_convertible_v<T*, js::Wrapper*>,
+ "T can't be a Wrapper type; this function discards wrappers");
+
+ if (is<T>()) {
+ return &as<T>();
+ }
+
+ JSObject* unwrapped = js::CheckedUnwrapStatic(this);
+ if (!unwrapped) {
+ return nullptr;
+ }
+
+ if (MOZ_LIKELY(unwrapped->is<T>())) {
+ return &unwrapped->as<T>();
+ }
+
+ MOZ_CRASH("Invalid object. Dead wrapper?");
+}
+
+inline JSObject* JSObject::maybeUnwrapAs(const JSClass* clasp) {
+ if (hasClass(clasp)) {
+ return this;
+ }
+
+ JSObject* unwrapped = js::CheckedUnwrapStatic(this);
+ if (!unwrapped) {
+ return nullptr;
+ }
+
+ if (MOZ_LIKELY(unwrapped->hasClass(clasp))) {
+ return unwrapped;
+ }
+
+ MOZ_CRASH("Invalid object. Dead wrapper?");
+}
+
+template <class T>
+T* JSObject::maybeUnwrapIf() {
+ static_assert(!std::is_convertible_v<T*, js::Wrapper*>,
+ "T can't be a Wrapper type; this function discards wrappers");
+
+ if (is<T>()) {
+ return &as<T>();
+ }
+
+ JSObject* unwrapped = js::CheckedUnwrapStatic(this);
+ return (unwrapped && unwrapped->is<T>()) ? &unwrapped->as<T>() : nullptr;
+}
+
+/*
+ * The only sensible way to compare JSObject with == is by identity. We use
+ * const& instead of * as a syntactic way to assert non-null. This leads to an
+ * abundance of address-of operators to identity. Hence this overload.
+ */
+static MOZ_ALWAYS_INLINE bool operator==(const JSObject& lhs,
+ const JSObject& rhs) {
+ return &lhs == &rhs;
+}
+
+static MOZ_ALWAYS_INLINE bool operator!=(const JSObject& lhs,
+ const JSObject& rhs) {
+ return &lhs != &rhs;
+}
+
+// Size of the various GC thing allocation sizes used for objects.
+struct JSObject_Slots0 : JSObject {
+ void* data[2];
+};
+struct JSObject_Slots2 : JSObject {
+ void* data[2];
+ js::Value fslots[2];
+};
+struct JSObject_Slots4 : JSObject {
+ void* data[2];
+ js::Value fslots[4];
+};
+struct JSObject_Slots6 : JSObject {
+ // Only used for extended functions which are required to have exactly six
+ // fixed slots due to JIT assumptions.
+ void* data[2];
+ js::Value fslots[6];
+};
+struct JSObject_Slots8 : JSObject {
+ void* data[2];
+ js::Value fslots[8];
+};
+struct JSObject_Slots12 : JSObject {
+ void* data[2];
+ js::Value fslots[12];
+};
+struct JSObject_Slots16 : JSObject {
+ void* data[2];
+ js::Value fslots[16];
+};
+
+namespace js {
+
+// Returns true if object may possibly use JSObject::swap. The JITs may better
+// optimize objects that can never swap (and thus change their type).
+//
+// If ObjectMayBeSwapped is false, it is safe to guard on pointer identity to
+// test immutable features of the object. For example, the target of a
+// JSFunction will not change. Note: the object can still be moved by GC.
+extern bool ObjectMayBeSwapped(const JSObject* obj);
+
+extern bool DefineFunctions(JSContext* cx, HandleObject obj,
+ const JSFunctionSpec* fs);
+
+/* ES6 draft rev 36 (2015 March 17) 7.1.1 ToPrimitive(vp[, preferredType]) */
+extern bool ToPrimitiveSlow(JSContext* cx, JSType hint, MutableHandleValue vp);
+
+inline bool ToPrimitive(JSContext* cx, MutableHandleValue vp) {
+ if (vp.isPrimitive()) {
+ return true;
+ }
+ return ToPrimitiveSlow(cx, JSTYPE_UNDEFINED, vp);
+}
+
+inline bool ToPrimitive(JSContext* cx, JSType preferredType,
+ MutableHandleValue vp) {
+ if (vp.isPrimitive()) {
+ return true;
+ }
+ return ToPrimitiveSlow(cx, preferredType, vp);
+}
+
+/*
+ * toString support. (This isn't called GetClassName because there's a macro in
+ * <windows.h> with that name.)
+ */
+MOZ_ALWAYS_INLINE const char* GetObjectClassName(JSContext* cx,
+ HandleObject obj);
+
+/*
+ * Prepare a |this| object to be returned to script. This includes replacing
+ * Windows with their corresponding WindowProxy.
+ *
+ * Helpers are also provided to first extract the |this| from specific
+ * types of environment.
+ */
+JSObject* GetThisObject(JSObject* obj);
+
+JSObject* GetThisObjectOfLexical(JSObject* env);
+
+JSObject* GetThisObjectOfWith(JSObject* env);
+
+} /* namespace js */
+
+namespace js {
+
+// ES6 9.1.15 GetPrototypeFromConstructor.
+extern bool GetPrototypeFromConstructor(JSContext* cx,
+ js::HandleObject newTarget,
+ JSProtoKey intrinsicDefaultProto,
+ js::MutableHandleObject proto);
+
+// https://tc39.github.io/ecma262/#sec-getprototypefromconstructor
+//
+// Determine which [[Prototype]] to use when creating a new object using a
+// builtin constructor.
+//
+// This sets `proto` to `nullptr` to mean "the builtin prototype object for
+// this type in the current realm", the common case.
+//
+// We could set it to `cx->global()->getOrCreatePrototype(protoKey)`, but
+// nullptr gets a fast path in e.g. js::NewObjectWithClassProtoCommon.
+//
+// intrinsicDefaultProto can be JSProto_Null if there's no appropriate
+// JSProtoKey enum; but we then select the wrong prototype object in a
+// multi-realm corner case (see bug 1515167).
+MOZ_ALWAYS_INLINE bool GetPrototypeFromBuiltinConstructor(
+ JSContext* cx, const CallArgs& args, JSProtoKey intrinsicDefaultProto,
+ js::MutableHandleObject proto) {
+ // We can skip the "prototype" lookup in the two common cases:
+ // 1. Builtin constructor called without `new`, as in `obj = Object();`.
+ // 2. Builtin constructor called with `new`, as in `obj = new Object();`.
+ //
+ // Cases that can't take the fast path include `new MySubclassOfObject()`,
+ // `new otherGlobal.Object()`, and `Reflect.construct(Object, [], Date)`.
+ if (!args.isConstructing() ||
+ &args.newTarget().toObject() == &args.callee()) {
+ MOZ_ASSERT(args.callee().hasSameRealmAs(cx));
+ proto.set(nullptr);
+ return true;
+ }
+
+ // We're calling this constructor from a derived class, retrieve the
+ // actual prototype from newTarget.
+ RootedObject newTarget(cx, &args.newTarget().toObject());
+ return GetPrototypeFromConstructor(cx, newTarget, intrinsicDefaultProto,
+ proto);
+}
+
+/* ES6 draft rev 32 (2015 Feb 2) 6.2.4.5 ToPropertyDescriptor(Obj) */
+bool ToPropertyDescriptor(JSContext* cx, HandleValue descval,
+ bool checkAccessors,
+ MutableHandle<JS::PropertyDescriptor> desc);
+
+/*
+ * Throw a TypeError if desc.getter() or setter() is not
+ * callable. This performs exactly the checks omitted by ToPropertyDescriptor
+ * when checkAccessors is false.
+ */
+Result<> CheckPropertyDescriptorAccessors(JSContext* cx,
+ Handle<JS::PropertyDescriptor> desc);
+
+void CompletePropertyDescriptor(MutableHandle<JS::PropertyDescriptor> desc);
+
+/*
+ * Read property descriptors from props, as for Object.defineProperties. See
+ * ES5 15.2.3.7 steps 3-5.
+ */
+extern bool ReadPropertyDescriptors(
+ JSContext* cx, HandleObject props, bool checkAccessors,
+ MutableHandleIdVector ids, MutableHandle<PropertyDescriptorVector> descs);
+
+/* Read the name using a dynamic lookup on the scopeChain. */
+extern bool LookupName(JSContext* cx, Handle<PropertyName*> name,
+ HandleObject scopeChain, MutableHandleObject objp,
+ MutableHandleObject pobjp, PropertyResult* propp);
+
+extern bool LookupNameNoGC(JSContext* cx, PropertyName* name,
+ JSObject* scopeChain, JSObject** objp,
+ NativeObject** pobjp, PropertyResult* propp);
+
+/*
+ * Like LookupName except returns the global object if 'name' is not found in
+ * any preceding scope.
+ *
+ * Additionally, pobjp and propp are not needed by callers so they are not
+ * returned.
+ */
+extern bool LookupNameWithGlobalDefault(JSContext* cx,
+ Handle<PropertyName*> name,
+ HandleObject scopeChain,
+ MutableHandleObject objp);
+
+/*
+ * Like LookupName except returns the unqualified var object if 'name' is not
+ * found in any preceding scope. Normally the unqualified var object is the
+ * global. If the value for the name in the looked-up scope is an
+ * uninitialized lexical, an UninitializedLexicalObject is returned.
+ *
+ * Additionally, pobjp is not needed by callers so it is not returned.
+ */
+extern bool LookupNameUnqualified(JSContext* cx, Handle<PropertyName*> name,
+ HandleObject scopeChain,
+ MutableHandleObject objp);
+
+} // namespace js
+
+namespace js {
+
+bool LookupPropertyPure(JSContext* cx, JSObject* obj, jsid id,
+ NativeObject** objp, PropertyResult* propp);
+
+bool LookupOwnPropertyPure(JSContext* cx, JSObject* obj, jsid id,
+ PropertyResult* propp);
+
+bool GetPropertyPure(JSContext* cx, JSObject* obj, jsid id, Value* vp);
+
+bool GetOwnPropertyPure(JSContext* cx, JSObject* obj, jsid id, Value* vp,
+ bool* found);
+
+bool GetGetterPure(JSContext* cx, JSObject* obj, jsid id, JSFunction** fp);
+
+bool GetOwnGetterPure(JSContext* cx, JSObject* obj, jsid id, JSFunction** fp);
+
+bool GetOwnNativeGetterPure(JSContext* cx, JSObject* obj, jsid id,
+ JSNative* native);
+
+bool HasOwnDataPropertyPure(JSContext* cx, JSObject* obj, jsid id,
+ bool* result);
+
+/*
+ * Like JS::FromPropertyDescriptor, but ignore desc.object() and always set vp
+ * to an object on success.
+ *
+ * Use JS::FromPropertyDescriptor for getOwnPropertyDescriptor, since
+ * desc.object() is used to indicate whether a result was found or not. Use
+ * this instead for defineProperty: it would be senseless to define a "missing"
+ * property.
+ */
+extern bool FromPropertyDescriptorToObject(JSContext* cx,
+ Handle<JS::PropertyDescriptor> desc,
+ MutableHandleValue vp);
+
+// obj is a JSObject*, but we root it immediately up front. We do it
+// that way because we need a Rooted temporary in this method anyway.
+extern bool IsPrototypeOf(JSContext* cx, HandleObject protoObj, JSObject* obj,
+ bool* result);
+
+/* Wrap boolean, number or string as Boolean, Number or String object. */
+extern JSObject* PrimitiveToObject(JSContext* cx, const Value& v);
+extern JSProtoKey PrimitiveToProtoKey(JSContext* cx, const Value& v);
+
+} /* namespace js */
+
+namespace js {
+
+JSObject* ToObjectSlowForPropertyAccess(JSContext* cx, JS::HandleValue val,
+ int valIndex, HandleId key);
+JSObject* ToObjectSlowForPropertyAccess(JSContext* cx, JS::HandleValue val,
+ int valIndex,
+ Handle<PropertyName*> key);
+JSObject* ToObjectSlowForPropertyAccess(JSContext* cx, JS::HandleValue val,
+ int valIndex, HandleValue keyValue);
+
+MOZ_ALWAYS_INLINE JSObject* ToObjectFromStackForPropertyAccess(JSContext* cx,
+ HandleValue vp,
+ int vpIndex,
+ HandleId key) {
+ if (vp.isObject()) {
+ return &vp.toObject();
+ }
+ return js::ToObjectSlowForPropertyAccess(cx, vp, vpIndex, key);
+}
+MOZ_ALWAYS_INLINE JSObject* ToObjectFromStackForPropertyAccess(
+ JSContext* cx, HandleValue vp, int vpIndex, Handle<PropertyName*> key) {
+ if (vp.isObject()) {
+ return &vp.toObject();
+ }
+ return js::ToObjectSlowForPropertyAccess(cx, vp, vpIndex, key);
+}
+MOZ_ALWAYS_INLINE JSObject* ToObjectFromStackForPropertyAccess(
+ JSContext* cx, HandleValue vp, int vpIndex, HandleValue key) {
+ if (vp.isObject()) {
+ return &vp.toObject();
+ }
+ return js::ToObjectSlowForPropertyAccess(cx, vp, vpIndex, key);
+}
+
+/*
+ * Report a TypeError: "so-and-so is not an object".
+ * Using NotNullObject is usually less code.
+ */
+extern void ReportNotObject(JSContext* cx, const Value& v);
+
+inline JSObject* RequireObject(JSContext* cx, HandleValue v) {
+ if (v.isObject()) {
+ return &v.toObject();
+ }
+ ReportNotObject(cx, v);
+ return nullptr;
+}
+
+/*
+ * Report a TypeError: "SOMETHING must be an object, got VALUE".
+ * Using NotNullObject is usually less code.
+ *
+ * By default this function will attempt to report the expression which computed
+ * the value which given as argument. This can be disabled by using
+ * JSDVG_IGNORE_STACK.
+ */
+extern void ReportNotObject(JSContext* cx, JSErrNum err, int spindex,
+ HandleValue v);
+
+inline JSObject* RequireObject(JSContext* cx, JSErrNum err, int spindex,
+ HandleValue v) {
+ if (v.isObject()) {
+ return &v.toObject();
+ }
+ ReportNotObject(cx, err, spindex, v);
+ return nullptr;
+}
+
+extern void ReportNotObject(JSContext* cx, JSErrNum err, HandleValue v);
+
+inline JSObject* RequireObject(JSContext* cx, JSErrNum err, HandleValue v) {
+ if (v.isObject()) {
+ return &v.toObject();
+ }
+ ReportNotObject(cx, err, v);
+ return nullptr;
+}
+
+/*
+ * Report a TypeError: "N-th argument of FUN must be an object, got VALUE".
+ * Using NotNullObjectArg is usually less code.
+ */
+extern void ReportNotObjectArg(JSContext* cx, const char* nth, const char* fun,
+ HandleValue v);
+
+inline JSObject* RequireObjectArg(JSContext* cx, const char* nth,
+ const char* fun, HandleValue v) {
+ if (v.isObject()) {
+ return &v.toObject();
+ }
+ ReportNotObjectArg(cx, nth, fun, v);
+ return nullptr;
+}
+
+extern bool GetFirstArgumentAsObject(JSContext* cx, const CallArgs& args,
+ const char* method,
+ MutableHandleObject objp);
+
+/* Helper for throwing, always returns false. */
+extern bool Throw(JSContext* cx, HandleId id, unsigned errorNumber,
+ const char* details = nullptr);
+
+/*
+ * ES6 rev 29 (6 Dec 2014) 7.3.13. Mark obj as non-extensible, and adjust each
+ * of obj's own properties' attributes appropriately: each property becomes
+ * non-configurable, and if level == Frozen, data properties become
+ * non-writable as well.
+ */
+extern bool SetIntegrityLevel(JSContext* cx, HandleObject obj,
+ IntegrityLevel level);
+
+inline bool FreezeObject(JSContext* cx, HandleObject obj) {
+ return SetIntegrityLevel(cx, obj, IntegrityLevel::Frozen);
+}
+
+/*
+ * ES6 rev 29 (6 Dec 2014) 7.3.14. Code shared by Object.isSealed and
+ * Object.isFrozen.
+ */
+extern bool TestIntegrityLevel(JSContext* cx, HandleObject obj,
+ IntegrityLevel level, bool* resultp);
+
+[[nodiscard]] extern JSObject* SpeciesConstructor(
+ JSContext* cx, HandleObject obj, HandleObject defaultCtor,
+ bool (*isDefaultSpecies)(JSContext*, JSFunction*));
+
+[[nodiscard]] extern JSObject* SpeciesConstructor(
+ JSContext* cx, HandleObject obj, JSProtoKey ctorKey,
+ bool (*isDefaultSpecies)(JSContext*, JSFunction*));
+
+extern bool GetObjectFromIncumbentGlobal(JSContext* cx,
+ MutableHandleObject obj);
+
+#ifdef DEBUG
+inline bool IsObjectValueInCompartment(const Value& v, JS::Compartment* comp) {
+ if (!v.isObject()) {
+ return true;
+ }
+ return v.toObject().compartment() == comp;
+}
+#endif
+
+/*
+ * A generic trace hook that calls the object's 'trace' method.
+ *
+ * If you are introducing a new JSObject subclass, MyObject, that needs a custom
+ * JSClassOps::trace function, it's often helpful to write `trace` as a
+ * non-static member function, since `this` will the correct type. In this case,
+ * you can use `CallTraceMethod<MyObject>` as your JSClassOps::trace value.
+ */
+template <typename ObjectSubclass>
+void CallTraceMethod(JSTracer* trc, JSObject* obj) {
+ obj->as<ObjectSubclass>().trace(trc);
+}
+
+#ifdef JS_HAS_CTYPES
+
+namespace ctypes {
+
+extern size_t SizeOfDataIfCDataObject(mozilla::MallocSizeOf mallocSizeOf,
+ JSObject* obj);
+
+} // namespace ctypes
+
+#endif
+
+#ifdef DEBUG
+void AssertJSClassInvariants(const JSClass* clasp);
+#endif
+
+} /* namespace js */
+
+#endif /* vm_JSObject_h */
diff --git a/js/src/vm/JSScript-inl.h b/js/src/vm/JSScript-inl.h
new file mode 100644
index 0000000000..2010f42060
--- /dev/null
+++ b/js/src/vm/JSScript-inl.h
@@ -0,0 +1,245 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_JSScript_inl_h
+#define vm_JSScript_inl_h
+
+#include "vm/JSScript.h"
+
+#include <utility>
+
+#include "jit/IonScript.h"
+#include "jit/JitScript.h"
+#include "vm/RegExpObject.h"
+#include "wasm/AsmJS.h"
+
+namespace js {
+
+ScriptCounts::ScriptCounts()
+ : pcCounts_(), throwCounts_(), ionCounts_(nullptr) {}
+
+ScriptCounts::ScriptCounts(PCCountsVector&& jumpTargets)
+ : pcCounts_(std::move(jumpTargets)), throwCounts_(), ionCounts_(nullptr) {}
+
+ScriptCounts::ScriptCounts(ScriptCounts&& src)
+ : pcCounts_(std::move(src.pcCounts_)),
+ throwCounts_(std::move(src.throwCounts_)),
+ ionCounts_(std::move(src.ionCounts_)) {
+ src.ionCounts_ = nullptr;
+}
+
+ScriptCounts& ScriptCounts::operator=(ScriptCounts&& src) {
+ pcCounts_ = std::move(src.pcCounts_);
+ throwCounts_ = std::move(src.throwCounts_);
+ ionCounts_ = std::move(src.ionCounts_);
+ src.ionCounts_ = nullptr;
+ return *this;
+}
+
+ScriptCounts::~ScriptCounts() { js_delete(ionCounts_); }
+
+ScriptAndCounts::ScriptAndCounts(JSScript* script)
+ : script(script), scriptCounts() {
+ script->releaseScriptCounts(&scriptCounts);
+}
+
+ScriptAndCounts::ScriptAndCounts(ScriptAndCounts&& sac)
+ : script(std::move(sac.script)),
+ scriptCounts(std::move(sac.scriptCounts)) {}
+
+void SetFrameArgumentsObject(JSContext* cx, AbstractFramePtr frame,
+ HandleScript script, JSObject* argsobj);
+
+inline void ScriptWarmUpData::initEnclosingScript(BaseScript* enclosingScript) {
+ MOZ_ASSERT(data_ == ResetState());
+ setTaggedPtr<EnclosingScriptTag>(enclosingScript);
+ static_assert(std::is_base_of_v<gc::TenuredCell, BaseScript>,
+ "BaseScript must be TenuredCell to avoid post-barriers");
+}
+inline void ScriptWarmUpData::clearEnclosingScript() {
+ gc::PreWriteBarrier(toEnclosingScript());
+ data_ = ResetState();
+}
+
+inline void ScriptWarmUpData::initEnclosingScope(Scope* enclosingScope) {
+ MOZ_ASSERT(data_ == ResetState());
+ setTaggedPtr<EnclosingScopeTag>(enclosingScope);
+ static_assert(std::is_base_of_v<gc::TenuredCell, Scope>,
+ "Scope must be TenuredCell to avoid post-barriers");
+}
+inline void ScriptWarmUpData::clearEnclosingScope() {
+ gc::PreWriteBarrier(toEnclosingScope());
+ data_ = ResetState();
+}
+
+inline JSPrincipals* BaseScript::principals() const {
+ return realm()->principals();
+}
+
+inline JSScript* BaseScript::asJSScript() {
+ MOZ_ASSERT(hasBytecode());
+ return static_cast<JSScript*>(this);
+}
+
+} // namespace js
+
+inline JSFunction* JSScript::getFunction(js::GCThingIndex index) const {
+ JSObject* obj = getObject(index);
+ MOZ_RELEASE_ASSERT(obj->is<JSFunction>(), "Script object is not JSFunction");
+ JSFunction* fun = &obj->as<JSFunction>();
+ MOZ_ASSERT_IF(fun->isNativeFun(), IsAsmJSModuleNative(fun->native()));
+ return fun;
+}
+
+inline JSFunction* JSScript::getFunction(jsbytecode* pc) const {
+ return getFunction(GET_GCTHING_INDEX(pc));
+}
+
+inline js::RegExpObject* JSScript::getRegExp(js::GCThingIndex index) const {
+ JSObject* obj = getObject(index);
+ MOZ_RELEASE_ASSERT(obj->is<js::RegExpObject>(),
+ "Script object is not RegExpObject");
+ return &obj->as<js::RegExpObject>();
+}
+
+inline js::RegExpObject* JSScript::getRegExp(jsbytecode* pc) const {
+ JSObject* obj = getObject(pc);
+ MOZ_RELEASE_ASSERT(obj->is<js::RegExpObject>(),
+ "Script object is not RegExpObject");
+ return &obj->as<js::RegExpObject>();
+}
+
+inline js::GlobalObject& JSScript::global() const {
+ /*
+ * A JSScript always marks its realm's global so we can assert it's non-null
+ * here. We don't need a read barrier here for the same reason
+ * JSObject::nonCCWGlobal doesn't need one.
+ */
+ return *realm()->unsafeUnbarrieredMaybeGlobal();
+}
+
+inline bool JSScript::hasGlobal(const js::GlobalObject* global) const {
+ return global == realm()->unsafeUnbarrieredMaybeGlobal();
+}
+
+inline js::LexicalScope* JSScript::maybeNamedLambdaScope() const {
+ // Dynamically created Functions via the 'new Function' are considered
+ // named lambdas but they do not have the named lambda scope of
+ // textually-created named lambdas.
+ js::Scope* scope = outermostScope();
+ if (scope->kind() == js::ScopeKind::NamedLambda ||
+ scope->kind() == js::ScopeKind::StrictNamedLambda) {
+ MOZ_ASSERT_IF(!strict(), scope->kind() == js::ScopeKind::NamedLambda);
+ MOZ_ASSERT_IF(strict(), scope->kind() == js::ScopeKind::StrictNamedLambda);
+ return &scope->as<js::LexicalScope>();
+ }
+ return nullptr;
+}
+
+inline js::Shape* JSScript::initialEnvironmentShape() const {
+ js::Scope* scope = bodyScope();
+ if (scope->is<js::FunctionScope>()) {
+ if (js::Shape* envShape = scope->environmentShape()) {
+ return envShape;
+ }
+ if (js::Scope* namedLambdaScope = maybeNamedLambdaScope()) {
+ return namedLambdaScope->environmentShape();
+ }
+ } else if (scope->is<js::EvalScope>()) {
+ return scope->environmentShape();
+ }
+ return nullptr;
+}
+
+inline bool JSScript::isDebuggee() const {
+ return realm()->debuggerObservesAllExecution() || hasDebugScript();
+}
+
+inline bool js::BaseScript::hasBaselineScript() const {
+ return hasJitScript() && jitScript()->hasBaselineScript();
+}
+
+inline bool js::BaseScript::hasIonScript() const {
+ return hasJitScript() && jitScript()->hasIonScript();
+}
+
+inline bool JSScript::isIonCompilingOffThread() const {
+ return hasJitScript() && jitScript()->isIonCompilingOffThread();
+}
+
+inline bool JSScript::canBaselineCompile() const {
+ bool disabled = baselineDisabled();
+#ifdef DEBUG
+ if (hasJitScript()) {
+ bool jitScriptDisabled =
+ jitScript()->baselineScript_ == js::jit::BaselineDisabledScriptPtr;
+ MOZ_ASSERT(disabled == jitScriptDisabled);
+ }
+#endif
+ return !disabled;
+}
+
+inline bool JSScript::canIonCompile() const {
+ bool disabled = ionDisabled();
+#ifdef DEBUG
+ if (hasJitScript()) {
+ bool jitScriptDisabled =
+ jitScript()->ionScript_ == js::jit::IonDisabledScriptPtr;
+ MOZ_ASSERT(disabled == jitScriptDisabled);
+ }
+#endif
+ return !disabled;
+}
+
+inline void JSScript::disableBaselineCompile() {
+ MOZ_ASSERT(!hasBaselineScript());
+ setFlag(MutableFlags::BaselineDisabled);
+ if (hasJitScript()) {
+ jitScript()->setBaselineScriptImpl(this,
+ js::jit::BaselineDisabledScriptPtr);
+ }
+}
+
+inline void JSScript::disableIon() {
+ setFlag(MutableFlags::IonDisabled);
+ if (hasJitScript()) {
+ jitScript()->setIonScriptImpl(this, js::jit::IonDisabledScriptPtr);
+ }
+}
+
+inline js::jit::BaselineScript* JSScript::baselineScript() const {
+ return jitScript()->baselineScript();
+}
+
+inline js::jit::IonScript* JSScript::ionScript() const {
+ return jitScript()->ionScript();
+}
+
+inline uint32_t JSScript::getWarmUpCount() const {
+ if (warmUpData_.isWarmUpCount()) {
+ return warmUpData_.toWarmUpCount();
+ }
+ return warmUpData_.toJitScript()->warmUpCount();
+}
+
+inline void JSScript::incWarmUpCounter() {
+ if (warmUpData_.isWarmUpCount()) {
+ warmUpData_.incWarmUpCount();
+ } else {
+ warmUpData_.toJitScript()->incWarmUpCount();
+ }
+}
+
+inline void JSScript::resetWarmUpCounterForGC() {
+ incWarmUpResetCounter();
+ if (warmUpData_.isWarmUpCount()) {
+ warmUpData_.resetWarmUpCount(0);
+ } else {
+ warmUpData_.toJitScript()->resetWarmUpCount(0);
+ }
+}
+
+#endif /* vm_JSScript_inl_h */
diff --git a/js/src/vm/JSScript.cpp b/js/src/vm/JSScript.cpp
new file mode 100644
index 0000000000..cac3c25a48
--- /dev/null
+++ b/js/src/vm/JSScript.cpp
@@ -0,0 +1,3779 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * JS script operations.
+ */
+
+#include "vm/JSScript-inl.h"
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/Span.h" // mozilla::{Span,Span}
+#include "mozilla/Sprintf.h"
+#include "mozilla/Utf8.h"
+#include "mozilla/Vector.h"
+
+#include <algorithm>
+#include <new>
+#include <string.h>
+#include <type_traits>
+#include <utility>
+
+#include "jstypes.h"
+
+#include "frontend/BytecodeSection.h"
+#include "frontend/CompilationStencil.h" // frontend::CompilationStencil
+#include "frontend/FrontendContext.h" // AutoReportFrontendContext
+#include "frontend/ParseContext.h"
+#include "frontend/SourceNotes.h" // SrcNote, SrcNoteType, SrcNoteIterator
+#include "frontend/Stencil.h" // DumpFunctionFlagsItems, DumpImmutableScriptFlags
+#include "frontend/StencilXdr.h" // XDRStencilEncoder
+#include "gc/GCContext.h"
+#include "jit/BaselineJIT.h"
+#include "jit/CacheIRHealth.h"
+#include "jit/Ion.h"
+#include "jit/IonScript.h"
+#include "jit/JitCode.h"
+#include "jit/JitOptions.h"
+#include "jit/JitRuntime.h"
+#include "js/CharacterEncoding.h" // JS_EncodeStringToUTF8
+#include "js/CompileOptions.h"
+#include "js/experimental/SourceHook.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/HeapAPI.h" // JS::GCCellPtr
+#include "js/MemoryMetrics.h"
+#include "js/Printer.h" // js::GenericPrinter, js::Fprinter, js::Sprinter, js::QuoteString
+#include "js/Transcoding.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h" // JS::UniqueChars
+#include "js/Value.h" // JS::Value
+#include "util/Poison.h"
+#include "util/StringBuffer.h"
+#include "util/Text.h"
+#include "vm/BigIntType.h" // JS::BigInt
+#include "vm/BytecodeIterator.h"
+#include "vm/BytecodeLocation.h"
+#include "vm/BytecodeUtil.h" // Disassemble
+#include "vm/Compression.h"
+#include "vm/HelperThreadState.h" // js::RunPendingSourceCompressions
+#include "vm/JSContext.h"
+#include "vm/JSFunction.h"
+#include "vm/JSObject.h"
+#include "vm/JSONPrinter.h" // JSONPrinter
+#include "vm/Opcodes.h"
+#include "vm/Scope.h" // Scope
+#include "vm/SharedImmutableStringsCache.h"
+#include "vm/StencilEnums.h" // TryNote, TryNoteKind, ScopeNote
+#include "vm/StringType.h" // JSString, JSAtom
+#include "vm/Time.h" // AutoIncrementalTimer
+#include "vm/ToSource.h" // JS::ValueToSource
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+
+#include "gc/Marking-inl.h"
+#include "vm/BytecodeIterator-inl.h"
+#include "vm/BytecodeLocation-inl.h"
+#include "vm/Compartment-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/SharedImmutableStringsCache-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+
+using mozilla::CheckedInt;
+using mozilla::Maybe;
+using mozilla::PodCopy;
+using mozilla::PointerRangeSize;
+using mozilla::Utf8AsUnsignedChars;
+using mozilla::Utf8Unit;
+
+using JS::CompileOptions;
+using JS::ReadOnlyCompileOptions;
+using JS::SourceText;
+
+bool js::BaseScript::isUsingInterpreterTrampoline(JSRuntime* rt) const {
+ return jitCodeRaw() == rt->jitRuntime()->interpreterStub().value;
+}
+
+js::ScriptSource* js::BaseScript::maybeForwardedScriptSource() const {
+ return MaybeForwarded(sourceObject())->source();
+}
+
+void js::BaseScript::setEnclosingScript(BaseScript* enclosingScript) {
+ MOZ_ASSERT(enclosingScript);
+ warmUpData_.initEnclosingScript(enclosingScript);
+}
+
+void js::BaseScript::setEnclosingScope(Scope* enclosingScope) {
+ if (warmUpData_.isEnclosingScript()) {
+ warmUpData_.clearEnclosingScript();
+ }
+
+ MOZ_ASSERT(enclosingScope);
+ warmUpData_.initEnclosingScope(enclosingScope);
+}
+
+void js::BaseScript::finalize(JS::GCContext* gcx) {
+ // Scripts with bytecode may have optional data stored in per-runtime or
+ // per-zone maps. Note that a failed compilation must not have entries since
+ // the script itself will not be marked as having bytecode.
+ if (hasBytecode()) {
+ JSScript* script = this->asJSScript();
+
+ if (coverage::IsLCovEnabled()) {
+ coverage::CollectScriptCoverage(script, true);
+ }
+
+ script->destroyScriptCounts();
+ }
+
+ {
+ JSRuntime* rt = gcx->runtime();
+ if (rt->hasJitRuntime() && rt->jitRuntime()->hasInterpreterEntryMap()) {
+ rt->jitRuntime()->getInterpreterEntryMap()->remove(this);
+ }
+
+ rt->geckoProfiler().onScriptFinalized(this);
+ }
+
+#ifdef MOZ_VTUNE
+ if (zone()->scriptVTuneIdMap) {
+ // Note: we should only get here if the VTune JIT profiler is running.
+ zone()->scriptVTuneIdMap->remove(this);
+ }
+#endif
+
+ if (warmUpData_.isJitScript()) {
+ JSScript* script = this->asJSScript();
+#ifdef JS_CACHEIR_SPEW
+ maybeUpdateWarmUpCount(script);
+#endif
+ script->releaseJitScriptOnFinalize(gcx);
+ }
+
+#ifdef JS_CACHEIR_SPEW
+ if (hasBytecode()) {
+ maybeSpewScriptFinalWarmUpCount(this->asJSScript());
+ }
+#endif
+
+ if (data_) {
+ // We don't need to triger any barriers here, just free the memory.
+ size_t size = data_->allocationSize();
+ AlwaysPoison(data_, JS_POISONED_JSSCRIPT_DATA_PATTERN, size,
+ MemCheckKind::MakeNoAccess);
+ gcx->free_(this, data_, size, MemoryUse::ScriptPrivateData);
+ }
+
+ freeSharedData();
+}
+
+js::Scope* js::BaseScript::releaseEnclosingScope() {
+ Scope* enclosing = warmUpData_.toEnclosingScope();
+ warmUpData_.clearEnclosingScope();
+ return enclosing;
+}
+
+void js::BaseScript::swapData(UniquePtr<PrivateScriptData>& other) {
+ if (data_) {
+ RemoveCellMemory(this, data_->allocationSize(),
+ MemoryUse::ScriptPrivateData);
+ }
+
+ PrivateScriptData* old = data_;
+ data_.set(zone(), other.release());
+ other.reset(old);
+
+ if (data_) {
+ AddCellMemory(this, data_->allocationSize(), MemoryUse::ScriptPrivateData);
+ }
+}
+
+js::Scope* js::BaseScript::enclosingScope() const {
+ MOZ_ASSERT(!warmUpData_.isEnclosingScript(),
+ "Enclosing scope is not computed yet");
+
+ if (warmUpData_.isEnclosingScope()) {
+ return warmUpData_.toEnclosingScope();
+ }
+
+ MOZ_ASSERT(data_, "Script doesn't seem to be compiled");
+
+ return gcthings()[js::GCThingIndex::outermostScopeIndex()]
+ .as<Scope>()
+ .enclosing();
+}
+
+size_t JSScript::numAlwaysLiveFixedSlots() const {
+ if (bodyScope()->is<js::FunctionScope>()) {
+ return bodyScope()->as<js::FunctionScope>().nextFrameSlot();
+ }
+ if (bodyScope()->is<js::ModuleScope>()) {
+ return bodyScope()->as<js::ModuleScope>().nextFrameSlot();
+ }
+ if (bodyScope()->is<js::EvalScope>() &&
+ bodyScope()->kind() == ScopeKind::StrictEval) {
+ return bodyScope()->as<js::EvalScope>().nextFrameSlot();
+ }
+ return 0;
+}
+
+unsigned JSScript::numArgs() const {
+ if (bodyScope()->is<js::FunctionScope>()) {
+ return bodyScope()->as<js::FunctionScope>().numPositionalFormalParameters();
+ }
+ return 0;
+}
+
+bool JSScript::functionHasParameterExprs() const {
+ // Only functions have parameters.
+ js::Scope* scope = bodyScope();
+ if (!scope->is<js::FunctionScope>()) {
+ return false;
+ }
+ return scope->as<js::FunctionScope>().hasParameterExprs();
+}
+
+bool JSScript::isModule() const { return bodyScope()->is<js::ModuleScope>(); }
+
+js::ModuleObject* JSScript::module() const {
+ MOZ_ASSERT(isModule());
+ return bodyScope()->as<js::ModuleScope>().module();
+}
+
+bool JSScript::isGlobalCode() const {
+ return bodyScope()->is<js::GlobalScope>();
+}
+
+js::VarScope* JSScript::functionExtraBodyVarScope() const {
+ MOZ_ASSERT(functionHasExtraBodyVarScope());
+ for (JS::GCCellPtr gcThing : gcthings()) {
+ if (!gcThing.is<js::Scope>()) {
+ continue;
+ }
+ js::Scope* scope = &gcThing.as<js::Scope>();
+ if (scope->kind() == js::ScopeKind::FunctionBodyVar) {
+ return &scope->as<js::VarScope>();
+ }
+ }
+ MOZ_CRASH("Function extra body var scope not found");
+}
+
+bool JSScript::needsBodyEnvironment() const {
+ for (JS::GCCellPtr gcThing : gcthings()) {
+ if (!gcThing.is<js::Scope>()) {
+ continue;
+ }
+ js::Scope* scope = &gcThing.as<js::Scope>();
+ if (ScopeKindIsInBody(scope->kind()) && scope->hasEnvironment()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool JSScript::isDirectEvalInFunction() const {
+ if (!isForEval()) {
+ return false;
+ }
+ return bodyScope()->hasOnChain(js::ScopeKind::Function);
+}
+
+// Initialize the optional arrays in the trailing allocation. This is a set of
+// offsets that delimit each optional array followed by the arrays themselves.
+// See comment before 'ImmutableScriptData' for more details.
+void ImmutableScriptData::initOptionalArrays(Offset* pcursor,
+ uint32_t numResumeOffsets,
+ uint32_t numScopeNotes,
+ uint32_t numTryNotes) {
+ Offset cursor = (*pcursor);
+
+ // The byte arrays must have already been padded.
+ MOZ_ASSERT(isAlignedOffset<CodeNoteAlign>(cursor),
+ "Bytecode and source notes should be padded to keep alignment");
+
+ // Each non-empty optional array needs will need an offset to its end.
+ unsigned numOptionalArrays = unsigned(numResumeOffsets > 0) +
+ unsigned(numScopeNotes > 0) +
+ unsigned(numTryNotes > 0);
+
+ // Default-initialize the optional-offsets.
+ initElements<Offset>(cursor, numOptionalArrays);
+ cursor += numOptionalArrays * sizeof(Offset);
+
+ // Offset between optional-offsets table and the optional arrays. This is
+ // later used to access the optional-offsets table as well as first optional
+ // array.
+ optArrayOffset_ = cursor;
+
+ // Each optional array that follows must store an end-offset in the offset
+ // table. Assign table entries by using this 'offsetIndex'. The index 0 is
+ // reserved for implicit value 'optArrayOffset'.
+ int offsetIndex = 0;
+
+ // Default-initialize optional 'resumeOffsets'.
+ MOZ_ASSERT(resumeOffsetsOffset() == cursor);
+ if (numResumeOffsets > 0) {
+ initElements<uint32_t>(cursor, numResumeOffsets);
+ cursor += numResumeOffsets * sizeof(uint32_t);
+ setOptionalOffset(++offsetIndex, cursor);
+ }
+ flagsRef().resumeOffsetsEndIndex = offsetIndex;
+
+ // Default-initialize optional 'scopeNotes'.
+ MOZ_ASSERT(scopeNotesOffset() == cursor);
+ if (numScopeNotes > 0) {
+ initElements<ScopeNote>(cursor, numScopeNotes);
+ cursor += numScopeNotes * sizeof(ScopeNote);
+ setOptionalOffset(++offsetIndex, cursor);
+ }
+ flagsRef().scopeNotesEndIndex = offsetIndex;
+
+ // Default-initialize optional 'tryNotes'
+ MOZ_ASSERT(tryNotesOffset() == cursor);
+ if (numTryNotes > 0) {
+ initElements<TryNote>(cursor, numTryNotes);
+ cursor += numTryNotes * sizeof(TryNote);
+ setOptionalOffset(++offsetIndex, cursor);
+ }
+ flagsRef().tryNotesEndIndex = offsetIndex;
+
+ MOZ_ASSERT(endOffset() == cursor);
+ (*pcursor) = cursor;
+}
+
+ImmutableScriptData::ImmutableScriptData(uint32_t codeLength,
+ uint32_t noteLength,
+ uint32_t numResumeOffsets,
+ uint32_t numScopeNotes,
+ uint32_t numTryNotes)
+ : codeLength_(codeLength) {
+ // Variable-length data begins immediately after ImmutableScriptData itself.
+ Offset cursor = sizeof(ImmutableScriptData);
+
+ // The following arrays are byte-aligned with additional padding to ensure
+ // that together they maintain uint32_t-alignment.
+ {
+ MOZ_ASSERT(isAlignedOffset<CodeNoteAlign>(cursor));
+
+ // Zero-initialize 'flags'
+ MOZ_ASSERT(isAlignedOffset<Flags>(cursor));
+ new (offsetToPointer<void>(cursor)) Flags{};
+ cursor += sizeof(Flags);
+
+ initElements<jsbytecode>(cursor, codeLength);
+ cursor += codeLength * sizeof(jsbytecode);
+
+ initElements<SrcNote>(cursor, noteLength);
+ cursor += noteLength * sizeof(SrcNote);
+
+ MOZ_ASSERT(isAlignedOffset<CodeNoteAlign>(cursor));
+ }
+
+ // Initialization for remaining arrays.
+ initOptionalArrays(&cursor, numResumeOffsets, numScopeNotes, numTryNotes);
+
+ // Check that we correctly recompute the expected values.
+ MOZ_ASSERT(this->codeLength() == codeLength);
+ MOZ_ASSERT(this->noteLength() == noteLength);
+
+ // Sanity check
+ MOZ_ASSERT(endOffset() == cursor);
+}
+
+void js::FillImmutableFlagsFromCompileOptionsForTopLevel(
+ const ReadOnlyCompileOptions& options, ImmutableScriptFlags& flags) {
+ using ImmutableFlags = ImmutableScriptFlagsEnum;
+
+ js::FillImmutableFlagsFromCompileOptionsForFunction(options, flags);
+
+ flags.setFlag(ImmutableFlags::TreatAsRunOnce, options.isRunOnce);
+ flags.setFlag(ImmutableFlags::NoScriptRval, options.noScriptRval);
+}
+
+void js::FillImmutableFlagsFromCompileOptionsForFunction(
+ const ReadOnlyCompileOptions& options, ImmutableScriptFlags& flags) {
+ using ImmutableFlags = ImmutableScriptFlagsEnum;
+
+ flags.setFlag(ImmutableFlags::SelfHosted, options.selfHostingMode);
+ flags.setFlag(ImmutableFlags::ForceStrict, options.forceStrictMode());
+ flags.setFlag(ImmutableFlags::HasNonSyntacticScope,
+ options.nonSyntacticScope);
+}
+
+// Check if flags matches to compile options for flags set by
+// FillImmutableFlagsFromCompileOptionsForTopLevel above.
+//
+// If isMultiDecode is true, this check minimal set of CompileOptions that is
+// shared across multiple scripts in JS::DecodeMultiStencilsOffThread.
+// Other options should be checked when getting the decoded script from the
+// cache.
+bool js::CheckCompileOptionsMatch(const ReadOnlyCompileOptions& options,
+ ImmutableScriptFlags flags,
+ bool isMultiDecode) {
+ using ImmutableFlags = ImmutableScriptFlagsEnum;
+
+ bool selfHosted = !!(flags & uint32_t(ImmutableFlags::SelfHosted));
+ bool forceStrict = !!(flags & uint32_t(ImmutableFlags::ForceStrict));
+ bool hasNonSyntacticScope =
+ !!(flags & uint32_t(ImmutableFlags::HasNonSyntacticScope));
+ bool noScriptRval = !!(flags & uint32_t(ImmutableFlags::NoScriptRval));
+ bool treatAsRunOnce = !!(flags & uint32_t(ImmutableFlags::TreatAsRunOnce));
+
+ return options.selfHostingMode == selfHosted &&
+ options.noScriptRval == noScriptRval &&
+ options.isRunOnce == treatAsRunOnce &&
+ (isMultiDecode || (options.forceStrictMode() == forceStrict &&
+ options.nonSyntacticScope == hasNonSyntacticScope));
+}
+
+JS_PUBLIC_API bool JS::CheckCompileOptionsMatch(
+ const ReadOnlyCompileOptions& options, JSScript* script) {
+ return js::CheckCompileOptionsMatch(options, script->immutableFlags(), false);
+}
+
+bool JSScript::initScriptCounts(JSContext* cx) {
+ MOZ_ASSERT(!hasScriptCounts());
+
+ // Record all pc which are the first instruction of a basic block.
+ mozilla::Vector<jsbytecode*, 16, SystemAllocPolicy> jumpTargets;
+
+ js::BytecodeLocation main = mainLocation();
+ AllBytecodesIterable iterable(this);
+ for (auto& loc : iterable) {
+ if (loc.isJumpTarget() || loc == main) {
+ if (!jumpTargets.append(loc.toRawBytecode())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+ }
+
+ // Initialize all PCCounts counters to 0.
+ ScriptCounts::PCCountsVector base;
+ if (!base.reserve(jumpTargets.length())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ for (size_t i = 0; i < jumpTargets.length(); i++) {
+ base.infallibleEmplaceBack(pcToOffset(jumpTargets[i]));
+ }
+
+ // Create zone's scriptCountsMap if necessary.
+ if (!zone()->scriptCountsMap) {
+ auto map = cx->make_unique<ScriptCountsMap>();
+ if (!map) {
+ return false;
+ }
+
+ zone()->scriptCountsMap = std::move(map);
+ }
+
+ // Allocate the ScriptCounts.
+ UniqueScriptCounts sc = cx->make_unique<ScriptCounts>(std::move(base));
+ if (!sc) {
+ return false;
+ }
+
+ MOZ_ASSERT(this->hasBytecode());
+
+ // Register the current ScriptCounts in the zone's map.
+ if (!zone()->scriptCountsMap->putNew(this, std::move(sc))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // safe to set this; we can't fail after this point.
+ setHasScriptCounts();
+
+ // Enable interrupts in any interpreter frames running on this script. This
+ // is used to let the interpreter increment the PCCounts, if present.
+ for (ActivationIterator iter(cx); !iter.done(); ++iter) {
+ if (iter->isInterpreter()) {
+ iter->asInterpreter()->enableInterruptsIfRunning(this);
+ }
+ }
+
+ return true;
+}
+
+static inline ScriptCountsMap::Ptr GetScriptCountsMapEntry(JSScript* script) {
+ MOZ_ASSERT(script->hasScriptCounts());
+ ScriptCountsMap::Ptr p = script->zone()->scriptCountsMap->lookup(script);
+ MOZ_ASSERT(p);
+ return p;
+}
+
+ScriptCounts& JSScript::getScriptCounts() {
+ ScriptCountsMap::Ptr p = GetScriptCountsMapEntry(this);
+ return *p->value();
+}
+
+js::PCCounts* ScriptCounts::maybeGetPCCounts(size_t offset) {
+ PCCounts searched = PCCounts(offset);
+ PCCounts* elem =
+ std::lower_bound(pcCounts_.begin(), pcCounts_.end(), searched);
+ if (elem == pcCounts_.end() || elem->pcOffset() != offset) {
+ return nullptr;
+ }
+ return elem;
+}
+
+const js::PCCounts* ScriptCounts::maybeGetPCCounts(size_t offset) const {
+ PCCounts searched = PCCounts(offset);
+ const PCCounts* elem =
+ std::lower_bound(pcCounts_.begin(), pcCounts_.end(), searched);
+ if (elem == pcCounts_.end() || elem->pcOffset() != offset) {
+ return nullptr;
+ }
+ return elem;
+}
+
+js::PCCounts* ScriptCounts::getImmediatePrecedingPCCounts(size_t offset) {
+ PCCounts searched = PCCounts(offset);
+ PCCounts* elem =
+ std::lower_bound(pcCounts_.begin(), pcCounts_.end(), searched);
+ if (elem == pcCounts_.end()) {
+ return &pcCounts_.back();
+ }
+ if (elem->pcOffset() == offset) {
+ return elem;
+ }
+ if (elem != pcCounts_.begin()) {
+ return elem - 1;
+ }
+ return nullptr;
+}
+
+const js::PCCounts* ScriptCounts::maybeGetThrowCounts(size_t offset) const {
+ PCCounts searched = PCCounts(offset);
+ const PCCounts* elem =
+ std::lower_bound(throwCounts_.begin(), throwCounts_.end(), searched);
+ if (elem == throwCounts_.end() || elem->pcOffset() != offset) {
+ return nullptr;
+ }
+ return elem;
+}
+
+const js::PCCounts* ScriptCounts::getImmediatePrecedingThrowCounts(
+ size_t offset) const {
+ PCCounts searched = PCCounts(offset);
+ const PCCounts* elem =
+ std::lower_bound(throwCounts_.begin(), throwCounts_.end(), searched);
+ if (elem == throwCounts_.end()) {
+ if (throwCounts_.begin() == throwCounts_.end()) {
+ return nullptr;
+ }
+ return &throwCounts_.back();
+ }
+ if (elem->pcOffset() == offset) {
+ return elem;
+ }
+ if (elem != throwCounts_.begin()) {
+ return elem - 1;
+ }
+ return nullptr;
+}
+
+js::PCCounts* ScriptCounts::getThrowCounts(size_t offset) {
+ PCCounts searched = PCCounts(offset);
+ PCCounts* elem =
+ std::lower_bound(throwCounts_.begin(), throwCounts_.end(), searched);
+ if (elem == throwCounts_.end() || elem->pcOffset() != offset) {
+ elem = throwCounts_.insert(elem, searched);
+ }
+ return elem;
+}
+
+size_t ScriptCounts::sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ size_t size = mallocSizeOf(this);
+ size += pcCounts_.sizeOfExcludingThis(mallocSizeOf);
+ size += throwCounts_.sizeOfExcludingThis(mallocSizeOf);
+ if (ionCounts_) {
+ size += ionCounts_->sizeOfIncludingThis(mallocSizeOf);
+ }
+ return size;
+}
+
+js::PCCounts* JSScript::maybeGetPCCounts(jsbytecode* pc) {
+ MOZ_ASSERT(containsPC(pc));
+ return getScriptCounts().maybeGetPCCounts(pcToOffset(pc));
+}
+
+const js::PCCounts* JSScript::maybeGetThrowCounts(jsbytecode* pc) {
+ MOZ_ASSERT(containsPC(pc));
+ return getScriptCounts().maybeGetThrowCounts(pcToOffset(pc));
+}
+
+js::PCCounts* JSScript::getThrowCounts(jsbytecode* pc) {
+ MOZ_ASSERT(containsPC(pc));
+ return getScriptCounts().getThrowCounts(pcToOffset(pc));
+}
+
+uint64_t JSScript::getHitCount(jsbytecode* pc) {
+ MOZ_ASSERT(containsPC(pc));
+ if (pc < main()) {
+ pc = main();
+ }
+
+ ScriptCounts& sc = getScriptCounts();
+ size_t targetOffset = pcToOffset(pc);
+ const js::PCCounts* baseCount =
+ sc.getImmediatePrecedingPCCounts(targetOffset);
+ if (!baseCount) {
+ return 0;
+ }
+ if (baseCount->pcOffset() == targetOffset) {
+ return baseCount->numExec();
+ }
+ MOZ_ASSERT(baseCount->pcOffset() < targetOffset);
+ uint64_t count = baseCount->numExec();
+ do {
+ const js::PCCounts* throwCount =
+ sc.getImmediatePrecedingThrowCounts(targetOffset);
+ if (!throwCount) {
+ return count;
+ }
+ if (throwCount->pcOffset() <= baseCount->pcOffset()) {
+ return count;
+ }
+ count -= throwCount->numExec();
+ targetOffset = throwCount->pcOffset() - 1;
+ } while (true);
+}
+
+void JSScript::addIonCounts(jit::IonScriptCounts* ionCounts) {
+ ScriptCounts& sc = getScriptCounts();
+ if (sc.ionCounts_) {
+ ionCounts->setPrevious(sc.ionCounts_);
+ }
+ sc.ionCounts_ = ionCounts;
+}
+
+jit::IonScriptCounts* JSScript::getIonCounts() {
+ return getScriptCounts().ionCounts_;
+}
+
+void JSScript::releaseScriptCounts(ScriptCounts* counts) {
+ ScriptCountsMap::Ptr p = GetScriptCountsMapEntry(this);
+ *counts = std::move(*p->value().get());
+ zone()->scriptCountsMap->remove(p);
+ clearHasScriptCounts();
+}
+
+void JSScript::destroyScriptCounts() {
+ if (hasScriptCounts()) {
+ ScriptCounts scriptCounts;
+ releaseScriptCounts(&scriptCounts);
+ }
+}
+
+void JSScript::resetScriptCounts() {
+ if (!hasScriptCounts()) {
+ return;
+ }
+
+ ScriptCounts& sc = getScriptCounts();
+
+ for (PCCounts& elem : sc.pcCounts_) {
+ elem.numExec() = 0;
+ }
+
+ for (PCCounts& elem : sc.throwCounts_) {
+ elem.numExec() = 0;
+ }
+}
+
+void ScriptSourceObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ MOZ_ASSERT(gcx->onMainThread());
+ ScriptSourceObject* sso = &obj->as<ScriptSourceObject>();
+ sso->source()->Release();
+
+ // Clear the private value, calling the release hook if necessary.
+ sso->setPrivate(gcx->runtime(), UndefinedValue());
+}
+
+static const JSClassOps ScriptSourceObjectClassOps = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ ScriptSourceObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const JSClass ScriptSourceObject::class_ = {
+ "ScriptSource",
+ JSCLASS_HAS_RESERVED_SLOTS(RESERVED_SLOTS) | JSCLASS_FOREGROUND_FINALIZE,
+ &ScriptSourceObjectClassOps};
+
+ScriptSourceObject* ScriptSourceObject::create(JSContext* cx,
+ ScriptSource* source) {
+ ScriptSourceObject* obj =
+ NewObjectWithGivenProto<ScriptSourceObject>(cx, nullptr);
+ if (!obj) {
+ return nullptr;
+ }
+
+ // The matching decref is in ScriptSourceObject::finalize.
+ obj->initReservedSlot(SOURCE_SLOT, PrivateValue(do_AddRef(source).take()));
+
+ // The slots below should be populated by a call to initFromOptions. Poison
+ // them.
+ obj->initReservedSlot(ELEMENT_PROPERTY_SLOT, MagicValue(JS_GENERIC_MAGIC));
+ obj->initReservedSlot(INTRODUCTION_SCRIPT_SLOT, MagicValue(JS_GENERIC_MAGIC));
+
+ return obj;
+}
+
+[[nodiscard]] static bool MaybeValidateFilename(
+ JSContext* cx, Handle<ScriptSourceObject*> sso,
+ const JS::InstantiateOptions& options) {
+ // When parsing off-thread we want to do filename validation on the main
+ // thread. This makes off-thread parsing more pure and is simpler because we
+ // can't easily throw exceptions off-thread.
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+
+ if (!gFilenameValidationCallback) {
+ return true;
+ }
+
+ const char* filename = sso->source()->filename();
+ if (!filename || options.skipFilenameValidation) {
+ return true;
+ }
+
+ if (gFilenameValidationCallback(cx, filename)) {
+ return true;
+ }
+
+ const char* utf8Filename;
+ if (mozilla::IsUtf8(mozilla::MakeStringSpan(filename))) {
+ utf8Filename = filename;
+ } else {
+ utf8Filename = "(invalid UTF-8 filename)";
+ }
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_UNSAFE_FILENAME,
+ utf8Filename);
+ return false;
+}
+
+/* static */
+bool ScriptSourceObject::initFromOptions(
+ JSContext* cx, Handle<ScriptSourceObject*> source,
+ const JS::InstantiateOptions& options) {
+ cx->releaseCheck(source);
+ MOZ_ASSERT(
+ source->getReservedSlot(ELEMENT_PROPERTY_SLOT).isMagic(JS_GENERIC_MAGIC));
+ MOZ_ASSERT(source->getReservedSlot(INTRODUCTION_SCRIPT_SLOT)
+ .isMagic(JS_GENERIC_MAGIC));
+
+ if (!MaybeValidateFilename(cx, source, options)) {
+ return false;
+ }
+
+ if (options.deferDebugMetadata) {
+ return true;
+ }
+
+ // Initialize the element attribute slot and introduction script slot
+ // this marks the SSO as initialized for asserts.
+
+ RootedString elementAttributeName(cx);
+ if (!initElementProperties(cx, source, elementAttributeName)) {
+ return false;
+ }
+
+ RootedValue introductionScript(cx);
+ source->setReservedSlot(INTRODUCTION_SCRIPT_SLOT, introductionScript);
+
+ return true;
+}
+
+/* static */
+bool ScriptSourceObject::initElementProperties(
+ JSContext* cx, Handle<ScriptSourceObject*> source,
+ HandleString elementAttrName) {
+ RootedValue nameValue(cx);
+ if (elementAttrName) {
+ nameValue = StringValue(elementAttrName);
+ }
+ if (!cx->compartment()->wrap(cx, &nameValue)) {
+ return false;
+ }
+
+ source->setReservedSlot(ELEMENT_PROPERTY_SLOT, nameValue);
+
+ return true;
+}
+
+void ScriptSourceObject::setPrivate(JSRuntime* rt, const Value& value) {
+ // Update the private value, calling addRef/release hooks if necessary
+ // to allow the embedding to maintain a reference count for the
+ // private data.
+ JS::AutoSuppressGCAnalysis nogc;
+ Value prevValue = getReservedSlot(PRIVATE_SLOT);
+ rt->releaseScriptPrivate(prevValue);
+ setReservedSlot(PRIVATE_SLOT, value);
+ rt->addRefScriptPrivate(value);
+}
+
+void ScriptSourceObject::clearPrivate(JSRuntime* rt) {
+ // Clear the private value, calling release hook if necessary.
+ // |this| may be gray, be careful not to create edges to it.
+ JS::AutoSuppressGCAnalysis nogc;
+ Value prevValue = getReservedSlot(PRIVATE_SLOT);
+ rt->releaseScriptPrivate(prevValue);
+ getSlotRef(PRIVATE_SLOT).setUndefinedUnchecked();
+}
+
+class ScriptSource::LoadSourceMatcher {
+ JSContext* const cx_;
+ ScriptSource* const ss_;
+ bool* const loaded_;
+
+ public:
+ explicit LoadSourceMatcher(JSContext* cx, ScriptSource* ss, bool* loaded)
+ : cx_(cx), ss_(ss), loaded_(loaded) {}
+
+ template <typename Unit, SourceRetrievable CanRetrieve>
+ bool operator()(const Compressed<Unit, CanRetrieve>&) const {
+ *loaded_ = true;
+ return true;
+ }
+
+ template <typename Unit, SourceRetrievable CanRetrieve>
+ bool operator()(const Uncompressed<Unit, CanRetrieve>&) const {
+ *loaded_ = true;
+ return true;
+ }
+
+ template <typename Unit>
+ bool operator()(const Retrievable<Unit>&) {
+ if (!cx_->runtime()->sourceHook.ref()) {
+ *loaded_ = false;
+ return true;
+ }
+
+ size_t length;
+
+ // The first argument is just for overloading -- its value doesn't matter.
+ if (!tryLoadAndSetSource(Unit('0'), &length)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ bool operator()(const Missing&) const {
+ *loaded_ = false;
+ return true;
+ }
+
+ private:
+ bool tryLoadAndSetSource(const Utf8Unit&, size_t* length) const {
+ char* utf8Source;
+ if (!cx_->runtime()->sourceHook->load(cx_, ss_->filename(), nullptr,
+ &utf8Source, length)) {
+ return false;
+ }
+
+ if (!utf8Source) {
+ *loaded_ = false;
+ return true;
+ }
+
+ if (!ss_->setRetrievedSource(
+ cx_, EntryUnits<Utf8Unit>(reinterpret_cast<Utf8Unit*>(utf8Source)),
+ *length)) {
+ return false;
+ }
+
+ *loaded_ = true;
+ return true;
+ }
+
+ bool tryLoadAndSetSource(const char16_t&, size_t* length) const {
+ char16_t* utf16Source;
+ if (!cx_->runtime()->sourceHook->load(cx_, ss_->filename(), &utf16Source,
+ nullptr, length)) {
+ return false;
+ }
+
+ if (!utf16Source) {
+ *loaded_ = false;
+ return true;
+ }
+
+ if (!ss_->setRetrievedSource(cx_, EntryUnits<char16_t>(utf16Source),
+ *length)) {
+ return false;
+ }
+
+ *loaded_ = true;
+ return true;
+ }
+};
+
+/* static */
+bool ScriptSource::loadSource(JSContext* cx, ScriptSource* ss, bool* loaded) {
+ return ss->data.match(LoadSourceMatcher(cx, ss, loaded));
+}
+
+/* static */
+JSLinearString* JSScript::sourceData(JSContext* cx, HandleScript script) {
+ MOZ_ASSERT(script->scriptSource()->hasSourceText());
+ return script->scriptSource()->substring(cx, script->sourceStart(),
+ script->sourceEnd());
+}
+
+bool BaseScript::appendSourceDataForToString(JSContext* cx, StringBuffer& buf) {
+ MOZ_ASSERT(scriptSource()->hasSourceText());
+ return scriptSource()->appendSubstring(cx, buf, toStringStart(),
+ toStringEnd());
+}
+
+void UncompressedSourceCache::holdEntry(AutoHoldEntry& holder,
+ const ScriptSourceChunk& ssc) {
+ MOZ_ASSERT(!holder_);
+ holder.holdEntry(this, ssc);
+ holder_ = &holder;
+}
+
+void UncompressedSourceCache::releaseEntry(AutoHoldEntry& holder) {
+ MOZ_ASSERT(holder_ == &holder);
+ holder_ = nullptr;
+}
+
+template <typename Unit>
+const Unit* UncompressedSourceCache::lookup(const ScriptSourceChunk& ssc,
+ AutoHoldEntry& holder) {
+ MOZ_ASSERT(!holder_);
+ MOZ_ASSERT(ssc.ss->isCompressed<Unit>());
+
+ if (!map_) {
+ return nullptr;
+ }
+
+ if (Map::Ptr p = map_->lookup(ssc)) {
+ holdEntry(holder, ssc);
+ return static_cast<const Unit*>(p->value().get());
+ }
+
+ return nullptr;
+}
+
+bool UncompressedSourceCache::put(const ScriptSourceChunk& ssc, SourceData data,
+ AutoHoldEntry& holder) {
+ MOZ_ASSERT(!holder_);
+
+ if (!map_) {
+ map_ = MakeUnique<Map>();
+ if (!map_) {
+ return false;
+ }
+ }
+
+ if (!map_->put(ssc, std::move(data))) {
+ return false;
+ }
+
+ holdEntry(holder, ssc);
+ return true;
+}
+
+void UncompressedSourceCache::purge() {
+ if (!map_) {
+ return;
+ }
+
+ for (Map::Range r = map_->all(); !r.empty(); r.popFront()) {
+ if (holder_ && r.front().key() == holder_->sourceChunk()) {
+ holder_->deferDelete(std::move(r.front().value()));
+ holder_ = nullptr;
+ }
+ }
+
+ map_ = nullptr;
+}
+
+size_t UncompressedSourceCache::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) {
+ size_t n = 0;
+ if (map_ && !map_->empty()) {
+ n += map_->shallowSizeOfIncludingThis(mallocSizeOf);
+ for (Map::Range r = map_->all(); !r.empty(); r.popFront()) {
+ n += mallocSizeOf(r.front().value().get());
+ }
+ }
+ return n;
+}
+
+template <typename Unit>
+const Unit* ScriptSource::chunkUnits(
+ JSContext* cx, UncompressedSourceCache::AutoHoldEntry& holder,
+ size_t chunk) {
+ const CompressedData<Unit>& c = *compressedData<Unit>();
+
+ ScriptSourceChunk ssc(this, chunk);
+ if (const Unit* decompressed =
+ cx->caches().uncompressedSourceCache.lookup<Unit>(ssc, holder)) {
+ return decompressed;
+ }
+
+ size_t totalLengthInBytes = length() * sizeof(Unit);
+ size_t chunkBytes = Compressor::chunkSize(totalLengthInBytes, chunk);
+
+ MOZ_ASSERT((chunkBytes % sizeof(Unit)) == 0);
+ const size_t chunkLength = chunkBytes / sizeof(Unit);
+ EntryUnits<Unit> decompressed(js_pod_malloc<Unit>(chunkLength));
+ if (!decompressed) {
+ JS_ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ // Compression treats input and output memory as plain ol' bytes. These
+ // reinterpret_cast<>s accord exactly with that.
+ if (!DecompressStringChunk(
+ reinterpret_cast<const unsigned char*>(c.raw.chars()), chunk,
+ reinterpret_cast<unsigned char*>(decompressed.get()), chunkBytes)) {
+ JS_ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ const Unit* ret = decompressed.get();
+ if (!cx->caches().uncompressedSourceCache.put(
+ ssc, ToSourceData(std::move(decompressed)), holder)) {
+ JS_ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ return ret;
+}
+
+template <typename Unit>
+void ScriptSource::convertToCompressedSource(SharedImmutableString compressed,
+ size_t uncompressedLength) {
+ MOZ_ASSERT(isUncompressed<Unit>());
+ MOZ_ASSERT(uncompressedData<Unit>()->length() == uncompressedLength);
+
+ if (data.is<Uncompressed<Unit, SourceRetrievable::Yes>>()) {
+ data = SourceType(Compressed<Unit, SourceRetrievable::Yes>(
+ std::move(compressed), uncompressedLength));
+ } else {
+ data = SourceType(Compressed<Unit, SourceRetrievable::No>(
+ std::move(compressed), uncompressedLength));
+ }
+}
+
+template <typename Unit>
+void ScriptSource::performDelayedConvertToCompressedSource(
+ ExclusiveData<ReaderInstances>::Guard& g) {
+ // There might not be a conversion to compressed source happening at all.
+ if (g->pendingCompressed.empty()) {
+ return;
+ }
+
+ CompressedData<Unit>& pending =
+ g->pendingCompressed.ref<CompressedData<Unit>>();
+
+ convertToCompressedSource<Unit>(std::move(pending.raw),
+ pending.uncompressedLength);
+
+ g->pendingCompressed.destroy();
+}
+
+template <typename Unit>
+ScriptSource::PinnedUnits<Unit>::~PinnedUnits() {
+ if (units_) {
+ // Note: We use a Mutex with Exclusive access, such that no PinnedUnits
+ // instance is live while we are compressing the source.
+ auto guard = source_->readers_.lock();
+ MOZ_ASSERT(guard->count > 0);
+ if (--guard->count) {
+ source_->performDelayedConvertToCompressedSource<Unit>(guard);
+ }
+ }
+}
+
+template <typename Unit>
+const Unit* ScriptSource::units(JSContext* cx,
+ UncompressedSourceCache::AutoHoldEntry& holder,
+ size_t begin, size_t len) {
+ MOZ_ASSERT(begin <= length());
+ MOZ_ASSERT(begin + len <= length());
+
+ if (isUncompressed<Unit>()) {
+ const Unit* units = uncompressedData<Unit>()->units();
+ if (!units) {
+ return nullptr;
+ }
+ return units + begin;
+ }
+
+ if (data.is<Missing>()) {
+ MOZ_CRASH("ScriptSource::units() on ScriptSource with missing source");
+ }
+
+ if (data.is<Retrievable<Unit>>()) {
+ MOZ_CRASH("ScriptSource::units() on ScriptSource with retrievable source");
+ }
+
+ MOZ_ASSERT(isCompressed<Unit>());
+
+ // Determine first/last chunks, the offset (in bytes) into the first chunk
+ // of the requested units, and the number of bytes in the last chunk.
+ //
+ // Note that first and last chunk sizes are miscomputed and *must not be
+ // used* when the first chunk is the last chunk.
+ size_t firstChunk, firstChunkOffset, firstChunkSize;
+ size_t lastChunk, lastChunkSize;
+ Compressor::rangeToChunkAndOffset(
+ begin * sizeof(Unit), (begin + len) * sizeof(Unit), &firstChunk,
+ &firstChunkOffset, &firstChunkSize, &lastChunk, &lastChunkSize);
+ MOZ_ASSERT(firstChunk <= lastChunk);
+ MOZ_ASSERT(firstChunkOffset % sizeof(Unit) == 0);
+ MOZ_ASSERT(firstChunkSize % sizeof(Unit) == 0);
+
+ size_t firstUnit = firstChunkOffset / sizeof(Unit);
+
+ // Directly return units within a single chunk. UncompressedSourceCache
+ // and |holder| will hold the units alive past function return.
+ if (firstChunk == lastChunk) {
+ const Unit* units = chunkUnits<Unit>(cx, holder, firstChunk);
+ if (!units) {
+ return nullptr;
+ }
+
+ return units + firstUnit;
+ }
+
+ // Otherwise the units span multiple chunks. Copy successive chunks'
+ // decompressed units into freshly-allocated memory to return.
+ EntryUnits<Unit> decompressed(js_pod_malloc<Unit>(len));
+ if (!decompressed) {
+ JS_ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ Unit* cursor;
+
+ {
+ // |AutoHoldEntry| is single-shot, and a holder successfully filled in
+ // by |chunkUnits| must be destroyed before another can be used. Thus
+ // we can't use |holder| with |chunkUnits| when |chunkUnits| is used
+ // with multiple chunks, and we must use and destroy distinct, fresh
+ // holders for each chunk.
+ UncompressedSourceCache::AutoHoldEntry firstHolder;
+ const Unit* units = chunkUnits<Unit>(cx, firstHolder, firstChunk);
+ if (!units) {
+ return nullptr;
+ }
+
+ cursor = std::copy_n(units + firstUnit, firstChunkSize / sizeof(Unit),
+ decompressed.get());
+ }
+
+ for (size_t i = firstChunk + 1; i < lastChunk; i++) {
+ UncompressedSourceCache::AutoHoldEntry chunkHolder;
+ const Unit* units = chunkUnits<Unit>(cx, chunkHolder, i);
+ if (!units) {
+ return nullptr;
+ }
+
+ cursor = std::copy_n(units, Compressor::CHUNK_SIZE / sizeof(Unit), cursor);
+ }
+
+ {
+ UncompressedSourceCache::AutoHoldEntry lastHolder;
+ const Unit* units = chunkUnits<Unit>(cx, lastHolder, lastChunk);
+ if (!units) {
+ return nullptr;
+ }
+
+ cursor = std::copy_n(units, lastChunkSize / sizeof(Unit), cursor);
+ }
+
+ MOZ_ASSERT(PointerRangeSize(decompressed.get(), cursor) == len);
+
+ // Transfer ownership to |holder|.
+ const Unit* ret = decompressed.get();
+ holder.holdUnits(std::move(decompressed));
+ return ret;
+}
+
+template <typename Unit>
+ScriptSource::PinnedUnits<Unit>::PinnedUnits(
+ JSContext* cx, ScriptSource* source,
+ UncompressedSourceCache::AutoHoldEntry& holder, size_t begin, size_t len)
+ : PinnedUnitsBase(source) {
+ MOZ_ASSERT(source->hasSourceType<Unit>(), "must pin units of source's type");
+
+ units_ = source->units<Unit>(cx, holder, begin, len);
+ if (units_) {
+ auto guard = source->readers_.lock();
+ guard->count++;
+ }
+}
+
+template class ScriptSource::PinnedUnits<Utf8Unit>;
+template class ScriptSource::PinnedUnits<char16_t>;
+
+JSLinearString* ScriptSource::substring(JSContext* cx, size_t start,
+ size_t stop) {
+ MOZ_ASSERT(start <= stop);
+
+ size_t len = stop - start;
+ if (!len) {
+ return cx->emptyString();
+ }
+ UncompressedSourceCache::AutoHoldEntry holder;
+
+ // UTF-8 source text.
+ if (hasSourceType<Utf8Unit>()) {
+ PinnedUnits<Utf8Unit> units(cx, this, holder, start, len);
+ if (!units.asChars()) {
+ return nullptr;
+ }
+
+ const char* str = units.asChars();
+ return NewStringCopyUTF8N(cx, JS::UTF8Chars(str, len));
+ }
+
+ // UTF-16 source text.
+ PinnedUnits<char16_t> units(cx, this, holder, start, len);
+ if (!units.asChars()) {
+ return nullptr;
+ }
+
+ return NewStringCopyN<CanGC>(cx, units.asChars(), len);
+}
+
+JSLinearString* ScriptSource::substringDontDeflate(JSContext* cx, size_t start,
+ size_t stop) {
+ MOZ_ASSERT(start <= stop);
+
+ size_t len = stop - start;
+ if (!len) {
+ return cx->emptyString();
+ }
+ UncompressedSourceCache::AutoHoldEntry holder;
+
+ // UTF-8 source text.
+ if (hasSourceType<Utf8Unit>()) {
+ PinnedUnits<Utf8Unit> units(cx, this, holder, start, len);
+ if (!units.asChars()) {
+ return nullptr;
+ }
+
+ const char* str = units.asChars();
+
+ // There doesn't appear to be a non-deflating UTF-8 string creation
+ // function -- but then again, it's not entirely clear how current
+ // callers benefit from non-deflation.
+ return NewStringCopyUTF8N(cx, JS::UTF8Chars(str, len));
+ }
+
+ // UTF-16 source text.
+ PinnedUnits<char16_t> units(cx, this, holder, start, len);
+ if (!units.asChars()) {
+ return nullptr;
+ }
+
+ return NewStringCopyNDontDeflate<CanGC>(cx, units.asChars(), len);
+}
+
+bool ScriptSource::appendSubstring(JSContext* cx, StringBuffer& buf,
+ size_t start, size_t stop) {
+ MOZ_ASSERT(start <= stop);
+
+ size_t len = stop - start;
+ UncompressedSourceCache::AutoHoldEntry holder;
+
+ if (hasSourceType<Utf8Unit>()) {
+ PinnedUnits<Utf8Unit> pinned(cx, this, holder, start, len);
+ if (!pinned.get()) {
+ return false;
+ }
+ if (len > SourceDeflateLimit && !buf.ensureTwoByteChars()) {
+ return false;
+ }
+
+ const Utf8Unit* units = pinned.get();
+ return buf.append(units, len);
+ } else {
+ PinnedUnits<char16_t> pinned(cx, this, holder, start, len);
+ if (!pinned.get()) {
+ return false;
+ }
+ if (len > SourceDeflateLimit && !buf.ensureTwoByteChars()) {
+ return false;
+ }
+
+ const char16_t* units = pinned.get();
+ return buf.append(units, len);
+ }
+}
+
+JSLinearString* ScriptSource::functionBodyString(JSContext* cx) {
+ MOZ_ASSERT(isFunctionBody());
+
+ size_t start = parameterListEnd_ + FunctionConstructorMedialSigils.length();
+ size_t stop = length() - FunctionConstructorFinalBrace.length();
+ return substring(cx, start, stop);
+}
+
+template <typename ContextT, typename Unit>
+[[nodiscard]] bool ScriptSource::setUncompressedSourceHelper(
+ ContextT* cx, EntryUnits<Unit>&& source, size_t length,
+ SourceRetrievable retrievable) {
+ auto& cache = SharedImmutableStringsCache::getSingleton();
+
+ auto uniqueChars = SourceTypeTraits<Unit>::toCacheable(std::move(source));
+ auto deduped = cache.getOrCreate(std::move(uniqueChars), length);
+ if (!deduped) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (retrievable == SourceRetrievable::Yes) {
+ data = SourceType(
+ Uncompressed<Unit, SourceRetrievable::Yes>(std::move(deduped)));
+ } else {
+ data = SourceType(
+ Uncompressed<Unit, SourceRetrievable::No>(std::move(deduped)));
+ }
+ return true;
+}
+
+template <typename Unit>
+[[nodiscard]] bool ScriptSource::setRetrievedSource(JSContext* cx,
+ EntryUnits<Unit>&& source,
+ size_t length) {
+ MOZ_ASSERT(data.is<Retrievable<Unit>>(),
+ "retrieved source can only overwrite the corresponding "
+ "retrievable source");
+ return setUncompressedSourceHelper(cx, std::move(source), length,
+ SourceRetrievable::Yes);
+}
+
+bool js::IsOffThreadSourceCompressionEnabled() {
+ // If we don't have concurrent execution compression will contend with
+ // main-thread execution, in which case we disable. Similarly we don't want to
+ // block the thread pool if it is too small.
+ return GetHelperThreadCPUCount() > 1 && GetHelperThreadCount() > 1 &&
+ CanUseExtraThreads();
+}
+
+bool ScriptSource::tryCompressOffThread(JSContext* cx) {
+ // Beware: |js::SynchronouslyCompressSource| assumes that this function is
+ // only called once, just after a script has been compiled, and it's never
+ // called at some random time after that. If multiple calls of this can ever
+ // occur, that function may require changes.
+
+ // The SourceCompressionTask needs to record the major GC number for
+ // scheduling. This cannot be accessed off-thread and must be handle in
+ // ParseTask::finish instead.
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+
+ // If source compression was already attempted, do not queue a new task.
+ if (hadCompressionTask_) {
+ return true;
+ }
+
+ if (!hasUncompressedSource()) {
+ // This excludes compressed, missing, and retrievable source.
+ return true;
+ }
+
+ // There are several cases where source compression is not a good idea:
+ // - If the script is tiny, then compression will save little or no space.
+ // - If there is only one core, then compression will contend with JS
+ // execution (which hurts benchmarketing).
+ //
+ // Otherwise, enqueue a compression task to be processed when a major
+ // GC is requested.
+
+ if (length() < ScriptSource::MinimumCompressibleLength ||
+ !IsOffThreadSourceCompressionEnabled()) {
+ return true;
+ }
+
+ // Heap allocate the task. It will be freed upon compression
+ // completing in AttachFinishedCompressedSources.
+ auto task = MakeUnique<SourceCompressionTask>(cx->runtime(), this);
+ if (!task) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ return EnqueueOffThreadCompression(cx, std::move(task));
+}
+
+template <typename Unit>
+void ScriptSource::triggerConvertToCompressedSource(
+ SharedImmutableString compressed, size_t uncompressedLength) {
+ MOZ_ASSERT(isUncompressed<Unit>(),
+ "should only be triggering compressed source installation to "
+ "overwrite identically-encoded uncompressed source");
+ MOZ_ASSERT(uncompressedData<Unit>()->length() == uncompressedLength);
+
+ // If units aren't pinned -- and they probably won't be, we'd have to have a
+ // GC in the small window of time where a |PinnedUnits| was live -- then we
+ // can immediately convert.
+ {
+ auto guard = readers_.lock();
+ if (MOZ_LIKELY(!guard->count)) {
+ convertToCompressedSource<Unit>(std::move(compressed),
+ uncompressedLength);
+ return;
+ }
+
+ // Otherwise, set aside the compressed-data info. The conversion is
+ // performed when the last |PinnedUnits| dies.
+ MOZ_ASSERT(guard->pendingCompressed.empty(),
+ "shouldn't be multiple conversions happening");
+ guard->pendingCompressed.construct<CompressedData<Unit>>(
+ std::move(compressed), uncompressedLength);
+ }
+}
+
+template <typename Unit>
+[[nodiscard]] bool ScriptSource::initializeWithUnretrievableCompressedSource(
+ FrontendContext* fc, UniqueChars&& compressed, size_t rawLength,
+ size_t sourceLength) {
+ MOZ_ASSERT(data.is<Missing>(), "shouldn't be double-initializing");
+ MOZ_ASSERT(compressed != nullptr);
+
+ auto& cache = SharedImmutableStringsCache::getSingleton();
+ auto deduped = cache.getOrCreate(std::move(compressed), rawLength);
+ if (!deduped) {
+ ReportOutOfMemory(fc);
+ return false;
+ }
+
+#ifdef DEBUG
+ {
+ auto guard = readers_.lock();
+ MOZ_ASSERT(
+ guard->count == 0,
+ "shouldn't be initializing a ScriptSource while its characters "
+ "are pinned -- that only makes sense with a ScriptSource actively "
+ "being inspected");
+ }
+#endif
+
+ data = SourceType(Compressed<Unit, SourceRetrievable::No>(std::move(deduped),
+ sourceLength));
+
+ return true;
+}
+
+template bool ScriptSource::initializeWithUnretrievableCompressedSource<
+ Utf8Unit>(FrontendContext* fc, UniqueChars&& compressed, size_t rawLength,
+ size_t sourceLength);
+template bool ScriptSource::initializeWithUnretrievableCompressedSource<
+ char16_t>(FrontendContext* fc, UniqueChars&& compressed, size_t rawLength,
+ size_t sourceLength);
+
+template <typename Unit>
+bool ScriptSource::assignSource(FrontendContext* fc,
+ const ReadOnlyCompileOptions& options,
+ SourceText<Unit>& srcBuf) {
+ MOZ_ASSERT(data.is<Missing>(),
+ "source assignment should only occur on fresh ScriptSources");
+
+ mutedErrors_ = options.mutedErrors();
+ delazificationMode_ = options.eagerDelazificationStrategy();
+
+ if (options.discardSource) {
+ return true;
+ }
+
+ if (options.sourceIsLazy) {
+ data = SourceType(Retrievable<Unit>());
+ return true;
+ }
+
+ auto& cache = SharedImmutableStringsCache::getSingleton();
+ auto deduped = cache.getOrCreate(srcBuf.get(), srcBuf.length(), [&srcBuf]() {
+ using CharT = typename SourceTypeTraits<Unit>::CharT;
+ return srcBuf.ownsUnits()
+ ? UniquePtr<CharT[], JS::FreePolicy>(srcBuf.takeChars())
+ : DuplicateString(srcBuf.get(), srcBuf.length());
+ });
+ if (!deduped) {
+ ReportOutOfMemory(fc);
+ return false;
+ }
+
+ data =
+ SourceType(Uncompressed<Unit, SourceRetrievable::No>(std::move(deduped)));
+ return true;
+}
+
+template bool ScriptSource::assignSource(FrontendContext* fc,
+ const ReadOnlyCompileOptions& options,
+ SourceText<char16_t>& srcBuf);
+template bool ScriptSource::assignSource(FrontendContext* fc,
+ const ReadOnlyCompileOptions& options,
+ SourceText<Utf8Unit>& srcBuf);
+
+[[nodiscard]] static bool reallocUniquePtr(UniqueChars& unique, size_t size) {
+ auto newPtr = static_cast<char*>(js_realloc(unique.get(), size));
+ if (!newPtr) {
+ return false;
+ }
+
+ // Since the realloc succeeded, unique is now holding a freed pointer.
+ (void)unique.release();
+ unique.reset(newPtr);
+ return true;
+}
+
+template <typename Unit>
+void SourceCompressionTask::workEncodingSpecific() {
+ MOZ_ASSERT(source_->isUncompressed<Unit>());
+
+ // Try to keep the maximum memory usage down by only allocating half the
+ // size of the string, first.
+ size_t inputBytes = source_->length() * sizeof(Unit);
+ size_t firstSize = inputBytes / 2;
+ UniqueChars compressed(js_pod_malloc<char>(firstSize));
+ if (!compressed) {
+ return;
+ }
+
+ const Unit* chars = source_->uncompressedData<Unit>()->units();
+ Compressor comp(reinterpret_cast<const unsigned char*>(chars), inputBytes);
+ if (!comp.init()) {
+ return;
+ }
+
+ comp.setOutput(reinterpret_cast<unsigned char*>(compressed.get()), firstSize);
+ bool cont = true;
+ bool reallocated = false;
+ while (cont) {
+ if (shouldCancel()) {
+ return;
+ }
+
+ switch (comp.compressMore()) {
+ case Compressor::CONTINUE:
+ break;
+ case Compressor::MOREOUTPUT: {
+ if (reallocated) {
+ // The compressed string is longer than the original string.
+ return;
+ }
+
+ // The compressed output is greater than half the size of the
+ // original string. Reallocate to the full size.
+ if (!reallocUniquePtr(compressed, inputBytes)) {
+ return;
+ }
+
+ comp.setOutput(reinterpret_cast<unsigned char*>(compressed.get()),
+ inputBytes);
+ reallocated = true;
+ break;
+ }
+ case Compressor::DONE:
+ cont = false;
+ break;
+ case Compressor::OOM:
+ return;
+ }
+ }
+
+ size_t totalBytes = comp.totalBytesNeeded();
+
+ // Shrink the buffer to the size of the compressed data.
+ if (!reallocUniquePtr(compressed, totalBytes)) {
+ return;
+ }
+
+ comp.finish(compressed.get(), totalBytes);
+
+ if (shouldCancel()) {
+ return;
+ }
+
+ auto& strings = SharedImmutableStringsCache::getSingleton();
+ resultString_ = strings.getOrCreate(std::move(compressed), totalBytes);
+}
+
+struct SourceCompressionTask::PerformTaskWork {
+ SourceCompressionTask* const task_;
+
+ explicit PerformTaskWork(SourceCompressionTask* task) : task_(task) {}
+
+ template <typename Unit, SourceRetrievable CanRetrieve>
+ void operator()(const ScriptSource::Uncompressed<Unit, CanRetrieve>&) {
+ task_->workEncodingSpecific<Unit>();
+ }
+
+ template <typename T>
+ void operator()(const T&) {
+ MOZ_CRASH(
+ "why are we compressing missing, missing-but-retrievable, "
+ "or already-compressed source?");
+ }
+};
+
+void ScriptSource::performTaskWork(SourceCompressionTask* task) {
+ MOZ_ASSERT(hasUncompressedSource());
+ data.match(SourceCompressionTask::PerformTaskWork(task));
+}
+
+void SourceCompressionTask::runTask() {
+ if (shouldCancel()) {
+ return;
+ }
+
+ MOZ_ASSERT(source_->hasUncompressedSource());
+
+ source_->performTaskWork(this);
+}
+
+void SourceCompressionTask::runHelperThreadTask(
+ AutoLockHelperThreadState& locked) {
+ {
+ AutoUnlockHelperThreadState unlock(locked);
+ this->runTask();
+ }
+
+ {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!HelperThreadState().compressionFinishedList(locked).append(this)) {
+ oomUnsafe.crash("SourceCompressionTask::runHelperThreadTask");
+ }
+ }
+}
+
+void ScriptSource::triggerConvertToCompressedSourceFromTask(
+ SharedImmutableString compressed) {
+ data.match(TriggerConvertToCompressedSourceFromTask(this, compressed));
+}
+
+void SourceCompressionTask::complete() {
+ if (!shouldCancel() && resultString_) {
+ source_->triggerConvertToCompressedSourceFromTask(std::move(resultString_));
+ }
+}
+
+bool js::SynchronouslyCompressSource(JSContext* cx,
+ JS::Handle<BaseScript*> script) {
+ MOZ_ASSERT(!cx->isHelperThreadContext(),
+ "should only sync-compress on the main thread");
+
+ // Finish all pending source compressions, including the single compression
+ // task that may have been created (by |ScriptSource::tryCompressOffThread|)
+ // just after the script was compiled. Because we have flushed this queue,
+ // no code below needs to synchronize with an off-thread parse task that
+ // assumes the immutability of a |ScriptSource|'s data.
+ //
+ // This *may* end up compressing |script|'s source. If it does -- we test
+ // this below -- that takes care of things. But if it doesn't, we will
+ // synchronously compress ourselves (and as noted above, this won't race
+ // anything).
+ RunPendingSourceCompressions(cx->runtime());
+
+ ScriptSource* ss = script->scriptSource();
+#ifdef DEBUG
+ {
+ auto guard = ss->readers_.lock();
+ MOZ_ASSERT(guard->count == 0,
+ "can't synchronously compress while source units are in use");
+ }
+#endif
+
+ // In principle a previously-triggered compression on a helper thread could
+ // have already completed. If that happens, there's nothing more to do.
+ if (ss->hasCompressedSource()) {
+ return true;
+ }
+
+ MOZ_ASSERT(ss->hasUncompressedSource(),
+ "shouldn't be compressing uncompressible source");
+
+ // Use an explicit scope to delineate the lifetime of |task|, for simplicity.
+ {
+#ifdef DEBUG
+ uint32_t sourceRefs = ss->refs;
+#endif
+ MOZ_ASSERT(sourceRefs > 0, "at least |script| here should have a ref");
+
+ // |SourceCompressionTask::shouldCancel| can periodically result in source
+ // compression being canceled if we're not careful. Guarantee that two refs
+ // to |ss| are always live in this function (at least one preexisting and
+ // one held by the task) so that compression is never canceled.
+ auto task = MakeUnique<SourceCompressionTask>(cx->runtime(), ss);
+ if (!task) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ MOZ_ASSERT(ss->refs > sourceRefs, "must have at least two refs now");
+
+ // Attempt to compress. This may not succeed if OOM happens, but (because
+ // it ordinarily happens on a helper thread) no error will ever be set here.
+ MOZ_ASSERT(!cx->isExceptionPending());
+ ss->performTaskWork(task.get());
+ MOZ_ASSERT(!cx->isExceptionPending());
+
+ // Convert |ss| from uncompressed to compressed data.
+ task->complete();
+
+ MOZ_ASSERT(!cx->isExceptionPending());
+ }
+
+ // The only way source won't be compressed here is if OOM happened.
+ return ss->hasCompressedSource();
+}
+
+void ScriptSource::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::ScriptSourceInfo* info) const {
+ info->misc += mallocSizeOf(this);
+ info->numScripts++;
+}
+
+bool ScriptSource::startIncrementalEncoding(
+ JSContext* cx,
+ UniquePtr<frontend::ExtensibleCompilationStencil>&& initial) {
+ // We don't support asm.js in XDR.
+ // Encoding failures are reported by the xdrFinalizeEncoder function.
+ if (initial->asmJS) {
+ return true;
+ }
+
+ // Remove the reference to the source, to avoid the circular reference.
+ initial->source = nullptr;
+
+ AutoIncrementalTimer timer(cx->realm()->timers.xdrEncodingTime);
+ auto failureCase = mozilla::MakeScopeExit([&] { xdrEncoder_.reset(); });
+
+ if (!xdrEncoder_.setInitial(
+ cx, std::forward<UniquePtr<frontend::ExtensibleCompilationStencil>>(
+ initial))) {
+ // On encoding failure, let failureCase destroy encoder and return true
+ // to avoid failing any currently executing script.
+ return false;
+ }
+
+ failureCase.release();
+ return true;
+}
+
+bool ScriptSource::addDelazificationToIncrementalEncoding(
+ JSContext* cx, const frontend::CompilationStencil& stencil) {
+ MOZ_ASSERT(hasEncoder());
+ AutoIncrementalTimer timer(cx->realm()->timers.xdrEncodingTime);
+ auto failureCase = mozilla::MakeScopeExit([&] { xdrEncoder_.reset(); });
+
+ if (!xdrEncoder_.addDelazification(cx, stencil)) {
+ // On encoding failure, let failureCase destroy encoder and return true
+ // to avoid failing any currently executing script.
+ return false;
+ }
+
+ failureCase.release();
+ return true;
+}
+
+bool ScriptSource::xdrFinalizeEncoder(JSContext* cx,
+ JS::TranscodeBuffer& buffer) {
+ if (!hasEncoder()) {
+ JS_ReportErrorASCII(cx, "XDR encoding failure");
+ return false;
+ }
+
+ auto cleanup = mozilla::MakeScopeExit([&] { xdrEncoder_.reset(); });
+
+ AutoReportFrontendContext fc(cx);
+ XDRStencilEncoder encoder(&fc, buffer);
+
+ frontend::BorrowingCompilationStencil borrowingStencil(
+ xdrEncoder_.merger_->getResult());
+ XDRResult res = encoder.codeStencil(this, borrowingStencil);
+ if (res.isErr()) {
+ if (JS::IsTranscodeFailureResult(res.unwrapErr())) {
+ fc.clearAutoReport();
+ JS_ReportErrorASCII(cx, "XDR encoding failure");
+ }
+ return false;
+ }
+ return true;
+}
+
+void ScriptSource::xdrAbortEncoder() { xdrEncoder_.reset(); }
+
+template <typename Unit>
+[[nodiscard]] bool ScriptSource::initializeUnretrievableUncompressedSource(
+ FrontendContext* fc, EntryUnits<Unit>&& source, size_t length) {
+ MOZ_ASSERT(data.is<Missing>(), "must be initializing a fresh ScriptSource");
+ return setUncompressedSourceHelper(fc, std::move(source), length,
+ SourceRetrievable::No);
+}
+
+template bool ScriptSource::initializeUnretrievableUncompressedSource(
+ FrontendContext* fc, EntryUnits<Utf8Unit>&& source, size_t length);
+template bool ScriptSource::initializeUnretrievableUncompressedSource(
+ FrontendContext* fc, EntryUnits<char16_t>&& source, size_t length);
+
+// Format and return a cx->pod_malloc'ed URL for a generated script like:
+// {filename} line {lineno} > {introducer}
+// For example:
+// foo.js line 7 > eval
+// indicating code compiled by the call to 'eval' on line 7 of foo.js.
+UniqueChars js::FormatIntroducedFilename(const char* filename, unsigned lineno,
+ const char* introducer) {
+ // Compute the length of the string in advance, so we can allocate a
+ // buffer of the right size on the first shot.
+ //
+ // (JS_smprintf would be perfect, as that allocates the result
+ // dynamically as it formats the string, but it won't allocate from cx,
+ // and wants us to use a special free function.)
+ char linenoBuf[15];
+ size_t filenameLen = strlen(filename);
+ size_t linenoLen = SprintfLiteral(linenoBuf, "%u", lineno);
+ size_t introducerLen = strlen(introducer);
+ size_t len = filenameLen + 6 /* == strlen(" line ") */ + linenoLen +
+ 3 /* == strlen(" > ") */ + introducerLen + 1 /* \0 */;
+ UniqueChars formatted(js_pod_malloc<char>(len));
+ if (!formatted) {
+ return nullptr;
+ }
+
+ mozilla::DebugOnly<size_t> checkLen = snprintf(
+ formatted.get(), len, "%s line %s > %s", filename, linenoBuf, introducer);
+ MOZ_ASSERT(checkLen == len - 1);
+
+ return formatted;
+}
+
+bool ScriptSource::initFromOptions(FrontendContext* fc,
+ const ReadOnlyCompileOptions& options) {
+ MOZ_ASSERT(!filename_);
+ MOZ_ASSERT(!introducerFilename_);
+
+ mutedErrors_ = options.mutedErrors();
+ delazificationMode_ = options.eagerDelazificationStrategy();
+
+ startLine_ = options.lineno;
+ startColumn_ = options.column;
+ introductionType_ = options.introductionType;
+ setIntroductionOffset(options.introductionOffset);
+ // The parameterListEnd_ is initialized later by setParameterListEnd, before
+ // we expose any scripts that use this ScriptSource to the debugger.
+
+ if (options.hasIntroductionInfo) {
+ MOZ_ASSERT(options.introductionType != nullptr);
+ const char* filename =
+ options.filename() ? options.filename() : "<unknown>";
+ UniqueChars formatted = FormatIntroducedFilename(
+ filename, options.introductionLineno, options.introductionType);
+ if (!formatted) {
+ ReportOutOfMemory(fc);
+ return false;
+ }
+ if (!setFilename(fc, std::move(formatted))) {
+ return false;
+ }
+ } else if (options.filename()) {
+ if (!setFilename(fc, options.filename())) {
+ return false;
+ }
+ }
+
+ if (options.introducerFilename()) {
+ if (!setIntroducerFilename(fc, options.introducerFilename())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Use the SharedImmutableString map to deduplicate input string. The input
+// string must be null-terminated.
+template <typename SharedT, typename CharT>
+static SharedT GetOrCreateStringZ(FrontendContext* fc,
+ UniquePtr<CharT[], JS::FreePolicy>&& str) {
+ size_t lengthWithNull = std::char_traits<CharT>::length(str.get()) + 1;
+ auto res = SharedImmutableStringsCache::getSingleton().getOrCreate(
+ std::move(str), lengthWithNull);
+ if (!res) {
+ ReportOutOfMemory(fc);
+ }
+ return res;
+}
+
+SharedImmutableString ScriptSource::getOrCreateStringZ(FrontendContext* fc,
+ UniqueChars&& str) {
+ return GetOrCreateStringZ<SharedImmutableString>(fc, std::move(str));
+}
+
+SharedImmutableTwoByteString ScriptSource::getOrCreateStringZ(
+ FrontendContext* fc, UniqueTwoByteChars&& str) {
+ return GetOrCreateStringZ<SharedImmutableTwoByteString>(fc, std::move(str));
+}
+
+bool ScriptSource::setFilename(FrontendContext* fc, const char* filename) {
+ UniqueChars owned = DuplicateString(fc, filename);
+ if (!owned) {
+ return false;
+ }
+ return setFilename(fc, std::move(owned));
+}
+
+bool ScriptSource::setFilename(FrontendContext* fc, UniqueChars&& filename) {
+ MOZ_ASSERT(!filename_);
+ filename_ = getOrCreateStringZ(fc, std::move(filename));
+ if (filename_) {
+ filenameHash_ =
+ mozilla::HashStringKnownLength(filename_.chars(), filename_.length());
+ return true;
+ }
+ return false;
+}
+
+bool ScriptSource::setIntroducerFilename(FrontendContext* fc,
+ const char* filename) {
+ UniqueChars owned = DuplicateString(fc, filename);
+ if (!owned) {
+ return false;
+ }
+ return setIntroducerFilename(fc, std::move(owned));
+}
+
+bool ScriptSource::setIntroducerFilename(FrontendContext* fc,
+ UniqueChars&& filename) {
+ MOZ_ASSERT(!introducerFilename_);
+ introducerFilename_ = getOrCreateStringZ(fc, std::move(filename));
+ return bool(introducerFilename_);
+}
+
+bool ScriptSource::setDisplayURL(FrontendContext* fc, const char16_t* url) {
+ UniqueTwoByteChars owned = DuplicateString(fc, url);
+ if (!owned) {
+ return false;
+ }
+ return setDisplayURL(fc, std::move(owned));
+}
+
+bool ScriptSource::setDisplayURL(FrontendContext* fc,
+ UniqueTwoByteChars&& url) {
+ MOZ_ASSERT(!hasDisplayURL());
+ MOZ_ASSERT(url);
+ if (url[0] == '\0') {
+ return true;
+ }
+
+ displayURL_ = getOrCreateStringZ(fc, std::move(url));
+ return bool(displayURL_);
+}
+
+bool ScriptSource::setSourceMapURL(FrontendContext* fc, const char16_t* url) {
+ UniqueTwoByteChars owned = DuplicateString(fc, url);
+ if (!owned) {
+ return false;
+ }
+ return setSourceMapURL(fc, std::move(owned));
+}
+
+bool ScriptSource::setSourceMapURL(FrontendContext* fc,
+ UniqueTwoByteChars&& url) {
+ MOZ_ASSERT(url);
+ if (url[0] == '\0') {
+ return true;
+ }
+
+ sourceMapURL_ = getOrCreateStringZ(fc, std::move(url));
+ return bool(sourceMapURL_);
+}
+
+/* static */ mozilla::Atomic<uint32_t, mozilla::SequentiallyConsistent>
+ ScriptSource::idCount_;
+
+/*
+ * [SMDOC] JSScript data layout (immutable)
+ *
+ * Script data that shareable across processes. There are no pointers (GC or
+ * otherwise) and the data is relocatable.
+ *
+ * Array elements Pointed to by Length
+ * -------------- ------------- ------
+ * jsbytecode code() codeLength()
+ * jsscrnote notes() noteLength()
+ * uint32_t resumeOffsets()
+ * ScopeNote scopeNotes()
+ * TryNote tryNotes()
+ */
+
+/* static */ CheckedInt<uint32_t> ImmutableScriptData::sizeFor(
+ uint32_t codeLength, uint32_t noteLength, uint32_t numResumeOffsets,
+ uint32_t numScopeNotes, uint32_t numTryNotes) {
+ // Take a count of which optional arrays will be used and need offset info.
+ unsigned numOptionalArrays = unsigned(numResumeOffsets > 0) +
+ unsigned(numScopeNotes > 0) +
+ unsigned(numTryNotes > 0);
+
+ // Compute size including trailing arrays.
+ CheckedInt<uint32_t> size = sizeof(ImmutableScriptData);
+ size += sizeof(Flags);
+ size += CheckedInt<uint32_t>(codeLength) * sizeof(jsbytecode);
+ size += CheckedInt<uint32_t>(noteLength) * sizeof(SrcNote);
+ size += CheckedInt<uint32_t>(numOptionalArrays) * sizeof(Offset);
+ size += CheckedInt<uint32_t>(numResumeOffsets) * sizeof(uint32_t);
+ size += CheckedInt<uint32_t>(numScopeNotes) * sizeof(ScopeNote);
+ size += CheckedInt<uint32_t>(numTryNotes) * sizeof(TryNote);
+
+ return size;
+}
+
+js::UniquePtr<ImmutableScriptData> js::ImmutableScriptData::new_(
+ FrontendContext* fc, uint32_t codeLength, uint32_t noteLength,
+ uint32_t numResumeOffsets, uint32_t numScopeNotes, uint32_t numTryNotes) {
+ auto size = sizeFor(codeLength, noteLength, numResumeOffsets, numScopeNotes,
+ numTryNotes);
+ if (!size.isValid()) {
+ ReportAllocationOverflow(fc);
+ return nullptr;
+ }
+
+ // Allocate contiguous raw buffer.
+ void* raw = fc->getAllocator()->pod_malloc<uint8_t>(size.value());
+ MOZ_ASSERT(uintptr_t(raw) % alignof(ImmutableScriptData) == 0);
+ if (!raw) {
+ return nullptr;
+ }
+
+ // Constuct the ImmutableScriptData. Trailing arrays are uninitialized but
+ // GCPtrs are put into a safe state.
+ UniquePtr<ImmutableScriptData> result(new (raw) ImmutableScriptData(
+ codeLength, noteLength, numResumeOffsets, numScopeNotes, numTryNotes));
+ if (!result) {
+ return nullptr;
+ }
+
+ // Sanity check
+ MOZ_ASSERT(result->endOffset() == size.value());
+
+ return result;
+}
+
+js::UniquePtr<ImmutableScriptData> js::ImmutableScriptData::new_(
+ FrontendContext* fc, uint32_t totalSize) {
+ void* raw = fc->getAllocator()->pod_malloc<uint8_t>(totalSize);
+ MOZ_ASSERT(uintptr_t(raw) % alignof(ImmutableScriptData) == 0);
+ UniquePtr<ImmutableScriptData> result(
+ reinterpret_cast<ImmutableScriptData*>(raw));
+ return result;
+}
+
+bool js::ImmutableScriptData::validateLayout(uint32_t expectedSize) {
+ constexpr size_t HeaderSize = sizeof(js::ImmutableScriptData);
+ constexpr size_t OptionalOffsetsMaxSize = 3 * sizeof(Offset);
+
+ // Check that the optional-offsets array lies within the allocation before we
+ // try to read from it while computing sizes. Remember that the array *ends*
+ // at the `optArrayOffset_`.
+ static_assert(OptionalOffsetsMaxSize <= HeaderSize);
+ if (HeaderSize > optArrayOffset_) {
+ return false;
+ }
+ if (optArrayOffset_ > expectedSize) {
+ return false;
+ }
+
+ // Round-trip the size computation using `CheckedInt` to detect overflow. This
+ // should indirectly validate most alignment, size, and ordering requirments.
+ auto size = sizeFor(codeLength(), noteLength(), resumeOffsets().size(),
+ scopeNotes().size(), tryNotes().size());
+ return size.isValid() && (size.value() == expectedSize);
+}
+
+/* static */
+SharedImmutableScriptData* SharedImmutableScriptData::create(
+ FrontendContext* fc) {
+ return fc->getAllocator()->new_<SharedImmutableScriptData>();
+}
+
+/* static */
+SharedImmutableScriptData* SharedImmutableScriptData::createWith(
+ FrontendContext* fc, js::UniquePtr<ImmutableScriptData>&& isd) {
+ MOZ_ASSERT(isd.get());
+ SharedImmutableScriptData* sisd = create(fc);
+ if (!sisd) {
+ return nullptr;
+ }
+
+ sisd->setOwn(std::move(isd));
+ return sisd;
+}
+
+void JSScript::relazify(JSRuntime* rt) {
+ js::Scope* scope = enclosingScope();
+ UniquePtr<PrivateScriptData> scriptData;
+
+ // Any JIT compiles should have been released, so we already point to the
+ // interpreter trampoline which supports lazy scripts.
+ MOZ_ASSERT_IF(jit::HasJitBackend(), isUsingInterpreterTrampoline(rt));
+
+ // Without bytecode, the script counts are invalid so destroy them if they
+ // still exist.
+ destroyScriptCounts();
+
+ // Release the bytecode and gcthings list.
+ // NOTE: We clear the PrivateScriptData to nullptr. This is fine because we
+ // only allowed relazification (via AllowRelazify) if the original lazy
+ // script we compiled from had a nullptr PrivateScriptData.
+ swapData(scriptData);
+ freeSharedData();
+
+ // We should not still be in any side-tables for the debugger or
+ // code-coverage. The finalizer will not be able to clean them up once
+ // bytecode is released. We check in JSFunction::maybeRelazify() for these
+ // conditions before requesting relazification.
+ MOZ_ASSERT(!coverage::IsLCovEnabled());
+ MOZ_ASSERT(!hasScriptCounts());
+ MOZ_ASSERT(!hasDebugScript());
+
+ // Rollback warmUpData_ to have enclosingScope.
+ MOZ_ASSERT(warmUpData_.isWarmUpCount(),
+ "JitScript should already be released");
+ warmUpData_.resetWarmUpCount(0);
+ warmUpData_.initEnclosingScope(scope);
+
+ MOZ_ASSERT(isReadyForDelazification());
+}
+
+// Takes ownership of the passed SharedImmutableScriptData and either adds it
+// into the runtime's SharedImmutableScriptDataTable, or frees it if a matching
+// entry already exists and replaces the passed RefPtr with the existing entry.
+/* static */
+bool SharedImmutableScriptData::shareScriptData(
+ FrontendContext* fc, RefPtr<SharedImmutableScriptData>& sisd) {
+ MOZ_ASSERT(sisd);
+ MOZ_ASSERT(sisd->refCount() == 1);
+
+ SharedImmutableScriptData* data = sisd.get();
+
+ SharedImmutableScriptData::Hasher::Lookup lookup(data);
+
+ Maybe<AutoLockGlobalScriptData> lock;
+ js::SharedImmutableScriptDataTable& table =
+ fc->scriptDataTableHolder()->getMaybeLocked(lock);
+
+ SharedImmutableScriptDataTable::AddPtr p = table.lookupForAdd(lookup);
+ if (p) {
+ MOZ_ASSERT(data != *p);
+ sisd = *p;
+ } else {
+ if (!table.add(p, data)) {
+ ReportOutOfMemory(fc);
+ return false;
+ }
+
+ // Being in the table counts as a reference on the script data.
+ data->AddRef();
+ }
+
+ // Refs: sisd argument, SharedImmutableScriptDataTable
+ MOZ_ASSERT(sisd->refCount() >= 2);
+
+ return true;
+}
+
+static void SweepScriptDataTable(SharedImmutableScriptDataTable& table) {
+ // Entries are removed from the table when their reference count is one,
+ // i.e. when the only reference to them is from the table entry.
+
+ for (SharedImmutableScriptDataTable::Enum e(table); !e.empty();
+ e.popFront()) {
+ SharedImmutableScriptData* sharedData = e.front();
+ if (sharedData->refCount() == 1) {
+ sharedData->Release();
+ e.removeFront();
+ }
+ }
+}
+
+void js::SweepScriptData(JSRuntime* rt) {
+ SweepScriptDataTable(rt->scriptDataTableHolder().getWithoutLock());
+
+ AutoLockGlobalScriptData lock;
+ SweepScriptDataTable(js::globalSharedScriptDataTableHolder.get(lock));
+}
+
+inline size_t PrivateScriptData::allocationSize() const { return endOffset(); }
+
+// Initialize and placement-new the trailing arrays.
+PrivateScriptData::PrivateScriptData(uint32_t ngcthings)
+ : ngcthings(ngcthings) {
+ // Variable-length data begins immediately after PrivateScriptData itself.
+ // NOTE: Alignment is computed using cursor/offset so the alignment of
+ // PrivateScriptData must be stricter than any trailing array type.
+ Offset cursor = sizeof(PrivateScriptData);
+
+ // Layout and initialize the gcthings array.
+ {
+ initElements<JS::GCCellPtr>(cursor, ngcthings);
+ cursor += ngcthings * sizeof(JS::GCCellPtr);
+ }
+
+ // Sanity check.
+ MOZ_ASSERT(endOffset() == cursor);
+}
+
+/* static */
+PrivateScriptData* PrivateScriptData::new_(JSContext* cx, uint32_t ngcthings) {
+ // Compute size including trailing arrays.
+ CheckedInt<Offset> size = sizeof(PrivateScriptData);
+ size += CheckedInt<Offset>(ngcthings) * sizeof(JS::GCCellPtr);
+ if (!size.isValid()) {
+ ReportAllocationOverflow(cx);
+ return nullptr;
+ }
+
+ // Allocate contiguous raw buffer for the trailing arrays.
+ void* raw = cx->pod_malloc<uint8_t>(size.value());
+ MOZ_ASSERT(uintptr_t(raw) % alignof(PrivateScriptData) == 0);
+ if (!raw) {
+ return nullptr;
+ }
+
+ // Constuct the PrivateScriptData. Trailing arrays are uninitialized but
+ // GCPtrs are put into a safe state.
+ PrivateScriptData* result = new (raw) PrivateScriptData(ngcthings);
+ if (!result) {
+ return nullptr;
+ }
+
+ // Sanity check.
+ MOZ_ASSERT(result->endOffset() == size.value());
+
+ return result;
+}
+
+/* static */
+bool PrivateScriptData::InitFromStencil(
+ JSContext* cx, js::HandleScript script,
+ const js::frontend::CompilationAtomCache& atomCache,
+ const js::frontend::CompilationStencil& stencil,
+ js::frontend::CompilationGCOutput& gcOutput,
+ const js::frontend::ScriptIndex scriptIndex) {
+ js::frontend::ScriptStencil& scriptStencil = stencil.scriptData[scriptIndex];
+ uint32_t ngcthings = scriptStencil.gcThingsLength;
+
+ MOZ_ASSERT(ngcthings <= INDEX_LIMIT);
+
+ // Create and initialize PrivateScriptData
+ if (!JSScript::createPrivateScriptData(cx, script, ngcthings)) {
+ return false;
+ }
+
+ js::PrivateScriptData* data = script->data_;
+ if (ngcthings) {
+ if (!EmitScriptThingsVector(cx, atomCache, stencil, gcOutput,
+ scriptStencil.gcthings(stencil),
+ data->gcthings())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void PrivateScriptData::trace(JSTracer* trc) {
+ for (JS::GCCellPtr& elem : gcthings()) {
+ TraceManuallyBarrieredGCCellPtr(trc, &elem, "script-gcthing");
+ }
+}
+
+/*static*/
+JSScript* JSScript::Create(JSContext* cx, JS::Handle<JSFunction*> function,
+ js::Handle<ScriptSourceObject*> sourceObject,
+ const SourceExtent& extent,
+ js::ImmutableScriptFlags flags) {
+ return static_cast<JSScript*>(
+ BaseScript::New(cx, function, sourceObject, extent, flags));
+}
+
+#ifdef MOZ_VTUNE
+uint32_t JSScript::vtuneMethodID() {
+ if (!zone()->scriptVTuneIdMap) {
+ auto map = MakeUnique<ScriptVTuneIdMap>();
+ if (!map) {
+ MOZ_CRASH("Failed to allocate ScriptVTuneIdMap");
+ }
+
+ zone()->scriptVTuneIdMap = std::move(map);
+ }
+
+ ScriptVTuneIdMap::AddPtr p = zone()->scriptVTuneIdMap->lookupForAdd(this);
+ if (p) {
+ return p->value();
+ }
+
+ MOZ_ASSERT(this->hasBytecode());
+
+ uint32_t id = vtune::GenerateUniqueMethodID();
+ if (!zone()->scriptVTuneIdMap->add(p, this, id)) {
+ MOZ_CRASH("Failed to add vtune method id");
+ }
+
+ return id;
+}
+#endif
+
+/* static */
+bool JSScript::createPrivateScriptData(JSContext* cx, HandleScript script,
+ uint32_t ngcthings) {
+ cx->check(script);
+
+ UniquePtr<PrivateScriptData> data(PrivateScriptData::new_(cx, ngcthings));
+ if (!data) {
+ return false;
+ }
+
+ script->swapData(data);
+ MOZ_ASSERT(!data);
+
+ return true;
+}
+
+/* static */
+bool JSScript::fullyInitFromStencil(
+ JSContext* cx, const js::frontend::CompilationAtomCache& atomCache,
+ const js::frontend::CompilationStencil& stencil,
+ frontend::CompilationGCOutput& gcOutput, HandleScript script,
+ const js::frontend::ScriptIndex scriptIndex) {
+ MutableScriptFlags lazyMutableFlags;
+ Rooted<Scope*> lazyEnclosingScope(cx);
+
+ // A holder for the lazy PrivateScriptData that we must keep around in case
+ // this process fails and we must return the script to its original state.
+ //
+ // This is initialized by BaseScript::swapData() which will run pre-barriers
+ // for us. On successful conversion to non-lazy script, the old script data
+ // here will be released by the UniquePtr.
+ Rooted<UniquePtr<PrivateScriptData>> lazyData(cx);
+
+ // Whether we are a newborn script or an existing lazy script, we should
+ // already be pointing to the interpreter trampoline.
+ MOZ_ASSERT_IF(jit::HasJitBackend(),
+ script->isUsingInterpreterTrampoline(cx->runtime()));
+
+ // If we are using an existing lazy script, record enough info to be able to
+ // rollback on failure.
+ if (script->isReadyForDelazification()) {
+ lazyMutableFlags = script->mutableFlags_;
+ lazyEnclosingScope = script->releaseEnclosingScope();
+ script->swapData(lazyData.get());
+ MOZ_ASSERT(script->sharedData_ == nullptr);
+ }
+
+ // Restore the script to lazy state on failure. If this was a fresh script, we
+ // just need to clear bytecode to mark script as incomplete.
+ auto rollbackGuard = mozilla::MakeScopeExit([&] {
+ if (lazyEnclosingScope) {
+ script->mutableFlags_ = lazyMutableFlags;
+ script->warmUpData_.initEnclosingScope(lazyEnclosingScope);
+ script->swapData(lazyData.get());
+ script->sharedData_ = nullptr;
+
+ MOZ_ASSERT(script->isReadyForDelazification());
+ } else {
+ script->sharedData_ = nullptr;
+ }
+ });
+
+ // The counts of indexed things must be checked during code generation.
+ MOZ_ASSERT(stencil.scriptData[scriptIndex].gcThingsLength <= INDEX_LIMIT);
+
+ // Note: These flags should already be correct when the BaseScript was
+ // allocated.
+ MOZ_ASSERT_IF(stencil.isInitialStencil(),
+ script->immutableFlags() ==
+ stencil.scriptExtra[scriptIndex].immutableFlags);
+
+ // Create and initialize PrivateScriptData
+ if (!PrivateScriptData::InitFromStencil(cx, script, atomCache, stencil,
+ gcOutput, scriptIndex)) {
+ return false;
+ }
+
+ // Member-initializer data is computed in initial parse only. If we are
+ // delazifying, make sure to copy it off the `lazyData` before we throw it
+ // away.
+ if (script->useMemberInitializers()) {
+ if (stencil.isInitialStencil()) {
+ MemberInitializers initializers(
+ stencil.scriptExtra[scriptIndex].memberInitializers());
+ script->setMemberInitializers(initializers);
+ } else {
+ script->setMemberInitializers(lazyData.get()->getMemberInitializers());
+ }
+ }
+
+ auto* scriptData = stencil.sharedData.get(scriptIndex);
+ MOZ_ASSERT_IF(
+ script->isGenerator() || script->isAsync(),
+ scriptData->nfixed() <= frontend::ParseContext::Scope::FixedSlotLimit);
+
+ script->initSharedData(scriptData);
+
+ // NOTE: JSScript is now constructed and should be linked in.
+ rollbackGuard.release();
+
+ // Link Scope -> JSFunction -> BaseScript.
+ if (script->isFunction()) {
+ JSFunction* fun = gcOutput.getFunction(scriptIndex);
+ script->bodyScope()->as<FunctionScope>().initCanonicalFunction(fun);
+ if (fun->isIncomplete()) {
+ fun->initScript(script);
+ } else if (fun->hasSelfHostedLazyScript()) {
+ fun->clearSelfHostedLazyScript();
+ fun->initScript(script);
+ } else {
+ // We are delazifying in-place.
+ MOZ_ASSERT(fun->baseScript() == script);
+ }
+ }
+
+ // NOTE: The caller is responsible for linking ModuleObjects if this is a
+ // module script.
+
+#ifdef JS_STRUCTURED_SPEW
+ // We want this to happen after line number initialization to allow filtering
+ // to work.
+ script->setSpewEnabled(cx->spewer().enabled(script));
+#endif
+
+#ifdef DEBUG
+ script->assertValidJumpTargets();
+#endif
+
+ if (coverage::IsLCovEnabled()) {
+ if (!coverage::InitScriptCoverage(cx, script)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+JSScript* JSScript::fromStencil(JSContext* cx,
+ frontend::CompilationAtomCache& atomCache,
+ const frontend::CompilationStencil& stencil,
+ frontend::CompilationGCOutput& gcOutput,
+ frontend::ScriptIndex scriptIndex) {
+ js::frontend::ScriptStencil& scriptStencil = stencil.scriptData[scriptIndex];
+ js::frontend::ScriptStencilExtra& scriptExtra =
+ stencil.scriptExtra[scriptIndex];
+ MOZ_ASSERT(scriptStencil.hasSharedData(),
+ "Need generated bytecode to use JSScript::fromStencil");
+
+ Rooted<JSFunction*> function(cx);
+ if (scriptStencil.isFunction()) {
+ function = gcOutput.getFunction(scriptIndex);
+ }
+
+ Rooted<ScriptSourceObject*> sourceObject(cx, gcOutput.sourceObject);
+ RootedScript script(cx, Create(cx, function, sourceObject, scriptExtra.extent,
+ scriptExtra.immutableFlags));
+ if (!script) {
+ return nullptr;
+ }
+
+ if (!fullyInitFromStencil(cx, atomCache, stencil, gcOutput, script,
+ scriptIndex)) {
+ return nullptr;
+ }
+
+ return script;
+}
+
+#ifdef DEBUG
+void JSScript::assertValidJumpTargets() const {
+ BytecodeLocation mainLoc = mainLocation();
+ BytecodeLocation endLoc = endLocation();
+ AllBytecodesIterable iter(this);
+ for (BytecodeLocation loc : iter) {
+ // Check jump instructions' target.
+ if (loc.isJump()) {
+ BytecodeLocation target = loc.getJumpTarget();
+ MOZ_ASSERT(mainLoc <= target && target < endLoc);
+ MOZ_ASSERT(target.isJumpTarget());
+
+ // All backward jumps must be to a JSOp::LoopHead op. This is an invariant
+ // we want to maintain to simplify JIT compilation and bytecode analysis.
+ MOZ_ASSERT_IF(target < loc, target.is(JSOp::LoopHead));
+ MOZ_ASSERT_IF(target < loc, IsBackedgePC(loc.toRawBytecode()));
+
+ // All forward jumps must be to a JSOp::JumpTarget op.
+ MOZ_ASSERT_IF(target > loc, target.is(JSOp::JumpTarget));
+
+ // Jumps must not cross scope boundaries.
+ MOZ_ASSERT(loc.innermostScope(this) == target.innermostScope(this));
+
+ // Check fallthrough of conditional jump instructions.
+ if (loc.fallsThrough()) {
+ BytecodeLocation fallthrough = loc.next();
+ MOZ_ASSERT(mainLoc <= fallthrough && fallthrough < endLoc);
+ MOZ_ASSERT(fallthrough.isJumpTarget());
+ }
+ }
+
+ // Check table switch case labels.
+ if (loc.is(JSOp::TableSwitch)) {
+ BytecodeLocation target = loc.getTableSwitchDefaultTarget();
+
+ // Default target.
+ MOZ_ASSERT(mainLoc <= target && target < endLoc);
+ MOZ_ASSERT(target.is(JSOp::JumpTarget));
+
+ int32_t low = loc.getTableSwitchLow();
+ int32_t high = loc.getTableSwitchHigh();
+
+ for (int i = 0; i < high - low + 1; i++) {
+ BytecodeLocation switchCase = loc.getTableSwitchCaseTarget(this, i);
+ MOZ_ASSERT(mainLoc <= switchCase && switchCase < endLoc);
+ MOZ_ASSERT(switchCase.is(JSOp::JumpTarget));
+ }
+ }
+ }
+
+ // Check catch/finally blocks as jump targets.
+ for (const TryNote& tn : trynotes()) {
+ if (tn.kind() != TryNoteKind::Catch && tn.kind() != TryNoteKind::Finally) {
+ continue;
+ }
+
+ jsbytecode* tryStart = offsetToPC(tn.start);
+ jsbytecode* tryPc = tryStart - JSOpLength_Try;
+ MOZ_ASSERT(JSOp(*tryPc) == JSOp::Try);
+
+ jsbytecode* tryTarget = tryStart + tn.length;
+ MOZ_ASSERT(main() <= tryTarget && tryTarget < codeEnd());
+ MOZ_ASSERT(BytecodeIsJumpTarget(JSOp(*tryTarget)));
+ }
+}
+#endif
+
+void JSScript::addSizeOfJitScript(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* sizeOfJitScript,
+ size_t* sizeOfBaselineFallbackStubs) const {
+ if (!hasJitScript()) {
+ return;
+ }
+
+ jitScript()->addSizeOfIncludingThis(mallocSizeOf, sizeOfJitScript,
+ sizeOfBaselineFallbackStubs);
+}
+
+js::GlobalObject& JSScript::uninlinedGlobal() const { return global(); }
+
+static const uint32_t GSN_CACHE_THRESHOLD = 100;
+
+void GSNCache::purge() {
+ code = nullptr;
+ map.clearAndCompact();
+}
+
+const js::SrcNote* js::GetSrcNote(GSNCache& cache, JSScript* script,
+ jsbytecode* pc) {
+ size_t target = pc - script->code();
+ if (target >= script->length()) {
+ return nullptr;
+ }
+
+ if (cache.code == script->code()) {
+ GSNCache::Map::Ptr p = cache.map.lookup(pc);
+ return p ? p->value() : nullptr;
+ }
+
+ size_t offset = 0;
+ const js::SrcNote* result;
+ for (SrcNoteIterator iter(script->notes());; ++iter) {
+ const auto* sn = *iter;
+ if (sn->isTerminator()) {
+ result = nullptr;
+ break;
+ }
+ offset += sn->delta();
+ if (offset == target && sn->isGettable()) {
+ result = sn;
+ break;
+ }
+ }
+
+ if (cache.code != script->code() && script->length() >= GSN_CACHE_THRESHOLD) {
+ unsigned nsrcnotes = 0;
+ for (SrcNoteIterator iter(script->notes()); !iter.atEnd(); ++iter) {
+ const auto* sn = *iter;
+ if (sn->isGettable()) {
+ ++nsrcnotes;
+ }
+ }
+ if (cache.code) {
+ cache.map.clear();
+ cache.code = nullptr;
+ }
+ if (cache.map.reserve(nsrcnotes)) {
+ pc = script->code();
+ for (SrcNoteIterator iter(script->notes()); !iter.atEnd(); ++iter) {
+ const auto* sn = *iter;
+ pc += sn->delta();
+ if (sn->isGettable()) {
+ cache.map.putNewInfallible(pc, sn);
+ }
+ }
+ cache.code = script->code();
+ }
+ }
+
+ return result;
+}
+
+const js::SrcNote* js::GetSrcNote(JSContext* cx, JSScript* script,
+ jsbytecode* pc) {
+ return GetSrcNote(cx->caches().gsnCache, script, pc);
+}
+
+unsigned js::PCToLineNumber(unsigned startLine, unsigned startCol,
+ SrcNote* notes, jsbytecode* code, jsbytecode* pc,
+ unsigned* columnp) {
+ unsigned lineno = startLine;
+ unsigned column = startCol;
+
+ /*
+ * Walk through source notes accumulating their deltas, keeping track of
+ * line-number notes, until we pass the note for pc's offset within
+ * script->code.
+ */
+ ptrdiff_t offset = 0;
+ ptrdiff_t target = pc - code;
+ for (SrcNoteIterator iter(notes); !iter.atEnd(); ++iter) {
+ const auto* sn = *iter;
+ offset += sn->delta();
+ if (offset > target) {
+ break;
+ }
+
+ SrcNoteType type = sn->type();
+ if (type == SrcNoteType::SetLine) {
+ lineno = SrcNote::SetLine::getLine(sn, startLine);
+ column = 0;
+ } else if (type == SrcNoteType::NewLine) {
+ lineno++;
+ column = 0;
+ } else if (type == SrcNoteType::ColSpan) {
+ ptrdiff_t colspan = SrcNote::ColSpan::getSpan(sn);
+ MOZ_ASSERT(ptrdiff_t(column) + colspan >= 0);
+ column += colspan;
+ }
+ }
+
+ if (columnp) {
+ *columnp = column;
+ }
+
+ return lineno;
+}
+
+unsigned js::PCToLineNumber(JSScript* script, jsbytecode* pc,
+ unsigned* columnp) {
+ /* Cope with InterpreterFrame.pc value prior to entering Interpret. */
+ if (!pc) {
+ return 0;
+ }
+
+ return PCToLineNumber(script->lineno(), script->column(), script->notes(),
+ script->code(), pc, columnp);
+}
+
+jsbytecode* js::LineNumberToPC(JSScript* script, unsigned target) {
+ ptrdiff_t offset = 0;
+ ptrdiff_t best = -1;
+ unsigned lineno = script->lineno();
+ unsigned bestdiff = SrcNote::MaxOperand;
+ for (SrcNoteIterator iter(script->notes()); !iter.atEnd(); ++iter) {
+ const auto* sn = *iter;
+ /*
+ * Exact-match only if offset is not in the prologue; otherwise use
+ * nearest greater-or-equal line number match.
+ */
+ if (lineno == target && offset >= ptrdiff_t(script->mainOffset())) {
+ goto out;
+ }
+ if (lineno >= target) {
+ unsigned diff = lineno - target;
+ if (diff < bestdiff) {
+ bestdiff = diff;
+ best = offset;
+ }
+ }
+ offset += sn->delta();
+ SrcNoteType type = sn->type();
+ if (type == SrcNoteType::SetLine) {
+ lineno = SrcNote::SetLine::getLine(sn, script->lineno());
+ } else if (type == SrcNoteType::NewLine) {
+ lineno++;
+ }
+ }
+ if (best >= 0) {
+ offset = best;
+ }
+out:
+ return script->offsetToPC(offset);
+}
+
+JS_PUBLIC_API unsigned js::GetScriptLineExtent(JSScript* script) {
+ unsigned lineno = script->lineno();
+ unsigned maxLineNo = lineno;
+ for (SrcNoteIterator iter(script->notes()); !iter.atEnd(); ++iter) {
+ const auto* sn = *iter;
+ SrcNoteType type = sn->type();
+ if (type == SrcNoteType::SetLine) {
+ lineno = SrcNote::SetLine::getLine(sn, script->lineno());
+ } else if (type == SrcNoteType::NewLine) {
+ lineno++;
+ }
+
+ if (maxLineNo < lineno) {
+ maxLineNo = lineno;
+ }
+ }
+
+ return 1 + maxLineNo - script->lineno();
+}
+
+#ifdef JS_CACHEIR_SPEW
+void js::maybeUpdateWarmUpCount(JSScript* script) {
+ if (script->needsFinalWarmUpCount()) {
+ ScriptFinalWarmUpCountMap* map =
+ script->zone()->scriptFinalWarmUpCountMap.get();
+ // If needsFinalWarmUpCount is true, ScriptFinalWarmUpCountMap must have
+ // already been created and thus must be asserted.
+ MOZ_ASSERT(map);
+ ScriptFinalWarmUpCountMap::Ptr p = map->lookup(script);
+ MOZ_ASSERT(p);
+
+ std::get<0>(p->value()) += script->jitScript()->warmUpCount();
+ }
+}
+
+void js::maybeSpewScriptFinalWarmUpCount(JSScript* script) {
+ if (script->needsFinalWarmUpCount()) {
+ ScriptFinalWarmUpCountMap* map =
+ script->zone()->scriptFinalWarmUpCountMap.get();
+ // If needsFinalWarmUpCount is true, ScriptFinalWarmUpCountMap must have
+ // already been created and thus must be asserted.
+ MOZ_ASSERT(map);
+ ScriptFinalWarmUpCountMap::Ptr p = map->lookup(script);
+ MOZ_ASSERT(p);
+ auto& tuple = p->value();
+ uint32_t warmUpCount = std::get<0>(tuple);
+ SharedImmutableString& scriptName = std::get<1>(tuple);
+
+ JSContext* cx = TlsContext.get();
+ cx->spewer().enableSpewing();
+
+ // In the case that we care about a script's final warmup count but the
+ // spewer is not enabled, AutoSpewChannel automatically sets and unsets
+ // the proper channel for the duration of spewing a health report's warm
+ // up count.
+ AutoSpewChannel channel(cx, SpewChannel::CacheIRHealthReport, script);
+ jit::CacheIRHealth cih;
+ cih.spewScriptFinalWarmUpCount(cx, scriptName.chars(), script, warmUpCount);
+
+ script->zone()->scriptFinalWarmUpCountMap->remove(script);
+ script->setNeedsFinalWarmUpCount(false);
+ }
+}
+#endif
+
+void js::DescribeScriptedCallerForDirectEval(JSContext* cx, HandleScript script,
+ jsbytecode* pc, const char** file,
+ unsigned* linenop,
+ uint32_t* pcOffset,
+ bool* mutedErrors) {
+ MOZ_ASSERT(script->containsPC(pc));
+
+ static_assert(JSOpLength_SpreadEval == JSOpLength_StrictSpreadEval,
+ "next op after a spread must be at consistent offset");
+ static_assert(JSOpLength_Eval == JSOpLength_StrictEval,
+ "next op after a direct eval must be at consistent offset");
+
+ MOZ_ASSERT(JSOp(*pc) == JSOp::Eval || JSOp(*pc) == JSOp::StrictEval ||
+ JSOp(*pc) == JSOp::SpreadEval ||
+ JSOp(*pc) == JSOp::StrictSpreadEval);
+
+ bool isSpread =
+ (JSOp(*pc) == JSOp::SpreadEval || JSOp(*pc) == JSOp::StrictSpreadEval);
+ jsbytecode* nextpc =
+ pc + (isSpread ? JSOpLength_SpreadEval : JSOpLength_Eval);
+ MOZ_ASSERT(JSOp(*nextpc) == JSOp::Lineno);
+
+ *file = script->filename();
+ *linenop = GET_UINT32(nextpc);
+ *pcOffset = script->pcToOffset(pc);
+ *mutedErrors = script->mutedErrors();
+}
+
+void js::DescribeScriptedCallerForCompilation(
+ JSContext* cx, MutableHandleScript maybeScript, const char** file,
+ unsigned* linenop, uint32_t* pcOffset, bool* mutedErrors) {
+ NonBuiltinFrameIter iter(cx, cx->realm()->principals());
+
+ if (iter.done()) {
+ maybeScript.set(nullptr);
+ *file = nullptr;
+ *linenop = 0;
+ *pcOffset = 0;
+ *mutedErrors = false;
+ return;
+ }
+
+ *file = iter.filename();
+ *linenop = iter.computeLine();
+ *mutedErrors = iter.mutedErrors();
+
+ // These values are only used for introducer fields which are debugging
+ // information and can be safely left null for wasm frames.
+ if (iter.hasScript()) {
+ maybeScript.set(iter.script());
+ *pcOffset = iter.pc() - maybeScript->code();
+ } else {
+ maybeScript.set(nullptr);
+ *pcOffset = 0;
+ }
+}
+
+template <typename SourceSpan, typename TargetSpan>
+void CopySpan(const SourceSpan& source, TargetSpan target) {
+ MOZ_ASSERT(source.size() == target.size());
+ std::copy(source.cbegin(), source.cend(), target.begin());
+}
+
+/* static */
+js::UniquePtr<ImmutableScriptData> ImmutableScriptData::new_(
+ FrontendContext* fc, uint32_t mainOffset, uint32_t nfixed, uint32_t nslots,
+ GCThingIndex bodyScopeIndex, uint32_t numICEntries, bool isFunction,
+ uint16_t funLength, uint16_t propertyCountEstimate,
+ mozilla::Span<const jsbytecode> code, mozilla::Span<const SrcNote> notes,
+ mozilla::Span<const uint32_t> resumeOffsets,
+ mozilla::Span<const ScopeNote> scopeNotes,
+ mozilla::Span<const TryNote> tryNotes) {
+ MOZ_RELEASE_ASSERT(code.Length() <= frontend::MaxBytecodeLength);
+
+ // There are 1-4 copies of SrcNoteType::Null appended after the source
+ // notes. These are a combination of sentinel and padding values.
+ static_assert(frontend::MaxSrcNotesLength <= UINT32_MAX - CodeNoteAlign,
+ "Length + CodeNoteAlign shouldn't overflow UINT32_MAX");
+ size_t noteLength = notes.Length();
+ MOZ_RELEASE_ASSERT(noteLength <= frontend::MaxSrcNotesLength);
+
+ size_t nullLength = ComputeNotePadding(code.Length(), noteLength);
+
+ // Allocate ImmutableScriptData
+ js::UniquePtr<ImmutableScriptData> data(ImmutableScriptData::new_(
+ fc, code.Length(), noteLength + nullLength, resumeOffsets.Length(),
+ scopeNotes.Length(), tryNotes.Length()));
+ if (!data) {
+ return data;
+ }
+
+ // Initialize POD fields
+ data->mainOffset = mainOffset;
+ data->nfixed = nfixed;
+ data->nslots = nslots;
+ data->bodyScopeIndex = bodyScopeIndex;
+ data->numICEntries = numICEntries;
+ data->propertyCountEstimate = propertyCountEstimate;
+
+ if (isFunction) {
+ data->funLength = funLength;
+ }
+
+ // Initialize trailing arrays
+ CopySpan(code, data->codeSpan());
+ CopySpan(notes, data->notesSpan().To(noteLength));
+ std::fill_n(data->notes() + noteLength, nullLength, SrcNote::terminator());
+ CopySpan(resumeOffsets, data->resumeOffsets());
+ CopySpan(scopeNotes, data->scopeNotes());
+ CopySpan(tryNotes, data->tryNotes());
+
+ return data;
+}
+
+void ScriptWarmUpData::trace(JSTracer* trc) {
+ uintptr_t tag = data_ & TagMask;
+ switch (tag) {
+ case EnclosingScriptTag: {
+ BaseScript* enclosingScript = toEnclosingScript();
+ BaseScript* prior = enclosingScript;
+ TraceManuallyBarrieredEdge(trc, &enclosingScript, "enclosingScript");
+ if (enclosingScript != prior) {
+ setTaggedPtr<EnclosingScriptTag>(enclosingScript);
+ }
+ break;
+ }
+
+ case EnclosingScopeTag: {
+ Scope* enclosingScope = toEnclosingScope();
+ Scope* prior = enclosingScope;
+ TraceManuallyBarrieredEdge(trc, &enclosingScope, "enclosingScope");
+ if (enclosingScope != prior) {
+ setTaggedPtr<EnclosingScopeTag>(enclosingScope);
+ }
+ break;
+ }
+
+ case JitScriptTag: {
+ toJitScript()->trace(trc);
+ break;
+ }
+
+ default: {
+ MOZ_ASSERT(isWarmUpCount());
+ break;
+ }
+ }
+}
+
+size_t JSScript::calculateLiveFixed(jsbytecode* pc) {
+ size_t nlivefixed = numAlwaysLiveFixedSlots();
+
+ if (nfixed() != nlivefixed) {
+ Scope* scope = lookupScope(pc);
+ if (scope) {
+ scope = MaybeForwarded(scope);
+ }
+
+ // Find the nearest LexicalScope in the same script.
+ while (scope && scope->is<WithScope>()) {
+ scope = scope->enclosing();
+ if (scope) {
+ scope = MaybeForwarded(scope);
+ }
+ }
+
+ if (scope) {
+ if (scope->is<LexicalScope>()) {
+ nlivefixed = scope->as<LexicalScope>().nextFrameSlot();
+ } else if (scope->is<VarScope>()) {
+ nlivefixed = scope->as<VarScope>().nextFrameSlot();
+ } else if (scope->is<ClassBodyScope>()) {
+ nlivefixed = scope->as<ClassBodyScope>().nextFrameSlot();
+ }
+ }
+ }
+
+ MOZ_ASSERT(nlivefixed <= nfixed());
+ MOZ_ASSERT(nlivefixed >= numAlwaysLiveFixedSlots());
+
+ return nlivefixed;
+}
+
+Scope* JSScript::lookupScope(const jsbytecode* pc) const {
+ MOZ_ASSERT(containsPC(pc));
+
+ size_t offset = pc - code();
+
+ auto notes = scopeNotes();
+ Scope* scope = nullptr;
+
+ // Find the innermost block chain using a binary search.
+ size_t bottom = 0;
+ size_t top = notes.size();
+
+ while (bottom < top) {
+ size_t mid = bottom + (top - bottom) / 2;
+ const ScopeNote* note = &notes[mid];
+ if (note->start <= offset) {
+ // Block scopes are ordered in the list by their starting offset, and
+ // since blocks form a tree ones earlier in the list may cover the pc even
+ // if later blocks end before the pc. This only happens when the earlier
+ // block is a parent of the later block, so we need to check parents of
+ // |mid| in the searched range for coverage.
+ size_t check = mid;
+ while (check >= bottom) {
+ const ScopeNote* checkNote = &notes[check];
+ MOZ_ASSERT(checkNote->start <= offset);
+ if (offset < checkNote->start + checkNote->length) {
+ // We found a matching block chain but there may be inner ones
+ // at a higher block chain index than mid. Continue the binary search.
+ if (checkNote->index == ScopeNote::NoScopeIndex) {
+ scope = nullptr;
+ } else {
+ scope = getScope(checkNote->index);
+ }
+ break;
+ }
+ if (checkNote->parent == UINT32_MAX) {
+ break;
+ }
+ check = checkNote->parent;
+ }
+ bottom = mid + 1;
+ } else {
+ top = mid;
+ }
+ }
+
+ return scope;
+}
+
+Scope* JSScript::innermostScope(const jsbytecode* pc) const {
+ if (Scope* scope = lookupScope(pc)) {
+ return scope;
+ }
+ return bodyScope();
+}
+
+void js::SetFrameArgumentsObject(JSContext* cx, AbstractFramePtr frame,
+ HandleScript script, JSObject* argsobj) {
+ /*
+ * If the arguments object was optimized out by scalar replacement,
+ * we must recreate it when we bail out. Because 'arguments' may have
+ * already been overwritten, we must check to see if the slot already
+ * contains a value.
+ */
+
+ Rooted<BindingIter> bi(cx, BindingIter(script));
+ while (bi && bi.name() != cx->names().arguments) {
+ bi++;
+ }
+ if (!bi) {
+ return;
+ }
+
+ if (bi.location().kind() == BindingLocation::Kind::Environment) {
+#ifdef DEBUG
+ /*
+ * If |arguments| lives in the call object, we should not have
+ * optimized it. Scan the script to find the slot in the call
+ * object that |arguments| is assigned to and verify that it
+ * already exists.
+ */
+ jsbytecode* pc = script->code();
+ while (JSOp(*pc) != JSOp::Arguments) {
+ pc += GetBytecodeLength(pc);
+ }
+ pc += JSOpLength_Arguments;
+ MOZ_ASSERT(JSOp(*pc) == JSOp::SetAliasedVar);
+
+ EnvironmentObject& env = frame.callObj().as<EnvironmentObject>();
+ MOZ_ASSERT(!env.aliasedBinding(bi).isMagic(JS_OPTIMIZED_OUT));
+#endif
+ return;
+ }
+
+ MOZ_ASSERT(bi.location().kind() == BindingLocation::Kind::Frame);
+ uint32_t frameSlot = bi.location().slot();
+ if (frame.unaliasedLocal(frameSlot).isMagic(JS_OPTIMIZED_OUT)) {
+ frame.unaliasedLocal(frameSlot) = ObjectValue(*argsobj);
+ }
+}
+
+bool JSScript::formalIsAliased(unsigned argSlot) {
+ if (functionHasParameterExprs()) {
+ return false;
+ }
+
+ for (PositionalFormalParameterIter fi(this); fi; fi++) {
+ if (fi.argumentSlot() == argSlot) {
+ return fi.closedOver();
+ }
+ }
+ MOZ_CRASH("Argument slot not found");
+}
+
+// Returns true if any formal argument is mapped by the arguments
+// object, but lives in the call object.
+bool JSScript::anyFormalIsForwarded() {
+ if (!argsObjAliasesFormals()) {
+ return false;
+ }
+
+ for (PositionalFormalParameterIter fi(this); fi; fi++) {
+ if (fi.closedOver()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool JSScript::formalLivesInArgumentsObject(unsigned argSlot) {
+ return argsObjAliasesFormals() && !formalIsAliased(argSlot);
+}
+
+BaseScript::BaseScript(uint8_t* stubEntry, JSFunction* function,
+ ScriptSourceObject* sourceObject,
+ const SourceExtent& extent, uint32_t immutableFlags)
+ : TenuredCellWithNonGCPointer(stubEntry),
+ function_(function),
+ sourceObject_(sourceObject),
+ extent_(extent),
+ immutableFlags_(immutableFlags) {
+ MOZ_ASSERT(extent_.toStringStart <= extent_.sourceStart);
+ MOZ_ASSERT(extent_.sourceStart <= extent_.sourceEnd);
+ MOZ_ASSERT(extent_.sourceEnd <= extent_.toStringEnd);
+}
+
+/* static */
+BaseScript* BaseScript::New(JSContext* cx, JS::Handle<JSFunction*> function,
+ Handle<ScriptSourceObject*> sourceObject,
+ const SourceExtent& extent,
+ uint32_t immutableFlags) {
+ uint8_t* stubEntry = nullptr;
+ if (jit::HasJitBackend()) {
+ stubEntry = cx->runtime()->jitRuntime()->interpreterStub().value;
+ }
+
+ MOZ_ASSERT_IF(function,
+ function->compartment() == sourceObject->compartment());
+ MOZ_ASSERT_IF(function, function->realm() == sourceObject->realm());
+
+ return cx->newCell<BaseScript>(stubEntry, function, sourceObject, extent,
+ immutableFlags);
+}
+
+/* static */
+BaseScript* BaseScript::CreateRawLazy(JSContext* cx, uint32_t ngcthings,
+ HandleFunction fun,
+ Handle<ScriptSourceObject*> sourceObject,
+ const SourceExtent& extent,
+ uint32_t immutableFlags) {
+ cx->check(fun);
+
+ BaseScript* lazy = New(cx, fun, sourceObject, extent, immutableFlags);
+ if (!lazy) {
+ return nullptr;
+ }
+
+ // Allocate a PrivateScriptData if it will not be empty. Lazy class
+ // constructors that use member initializers also need PrivateScriptData for
+ // field data.
+ //
+ // This condition is implicit in BaseScript::hasPrivateScriptData, and should
+ // be mirrored on InputScript::hasPrivateScriptData.
+ if (ngcthings || lazy->useMemberInitializers()) {
+ UniquePtr<PrivateScriptData> data(PrivateScriptData::new_(cx, ngcthings));
+ if (!data) {
+ return nullptr;
+ }
+ lazy->swapData(data);
+ MOZ_ASSERT(!data);
+ }
+
+ return lazy;
+}
+
+void JSScript::updateJitCodeRaw(JSRuntime* rt) {
+ MOZ_ASSERT(rt);
+ if (hasBaselineScript() && baselineScript()->hasPendingIonCompileTask()) {
+ MOZ_ASSERT(!isIonCompilingOffThread());
+ setJitCodeRaw(rt->jitRuntime()->lazyLinkStub().value);
+ } else if (hasIonScript()) {
+ jit::IonScript* ion = ionScript();
+ setJitCodeRaw(ion->method()->raw());
+ } else if (hasBaselineScript()) {
+ setJitCodeRaw(baselineScript()->method()->raw());
+ } else if (hasJitScript() && js::jit::IsBaselineInterpreterEnabled()) {
+ bool usingEntryTrampoline = false;
+ if (js::jit::JitOptions.emitInterpreterEntryTrampoline) {
+ auto p = rt->jitRuntime()->getInterpreterEntryMap()->lookup(this);
+ if (p) {
+ setJitCodeRaw(p->value().raw());
+ usingEntryTrampoline = true;
+ }
+ }
+ if (!usingEntryTrampoline) {
+ setJitCodeRaw(rt->jitRuntime()->baselineInterpreter().codeRaw());
+ }
+ } else {
+ setJitCodeRaw(rt->jitRuntime()->interpreterStub().value);
+ }
+ MOZ_ASSERT(jitCodeRaw());
+}
+
+bool JSScript::hasLoops() {
+ for (const TryNote& tn : trynotes()) {
+ if (tn.isLoop()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool JSScript::mayReadFrameArgsDirectly() {
+ return needsArgsObj() || usesArgumentsIntrinsics() || hasRest();
+}
+
+void JSScript::resetWarmUpCounterToDelayIonCompilation() {
+ // Reset the warm-up count only if it's greater than the BaselineCompiler
+ // threshold. We do this to ensure this has no effect on Baseline compilation
+ // because we don't want scripts to get stuck in the (Baseline) interpreter in
+ // pathological cases.
+
+ if (getWarmUpCount() > jit::JitOptions.baselineJitWarmUpThreshold) {
+ incWarmUpResetCounter();
+ uint32_t newCount = jit::JitOptions.baselineJitWarmUpThreshold;
+ if (warmUpData_.isWarmUpCount()) {
+ warmUpData_.resetWarmUpCount(newCount);
+ } else {
+ warmUpData_.toJitScript()->resetWarmUpCount(newCount);
+ }
+ }
+}
+
+gc::AllocSite* JSScript::createAllocSite() {
+ return jitScript()->createAllocSite(this);
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+
+void JSScript::dump(JSContext* cx) {
+ JS::Rooted<JSScript*> script(cx, this);
+
+ js::Sprinter sp(cx);
+ if (!sp.init()) {
+ return;
+ }
+
+ DumpOptions options;
+ options.runtimeData = true;
+ if (!dump(cx, script, options, &sp)) {
+ return;
+ }
+
+ fprintf(stderr, "%s\n", sp.string());
+}
+
+void JSScript::dumpRecursive(JSContext* cx) {
+ JS::Rooted<JSScript*> script(cx, this);
+
+ js::Sprinter sp(cx);
+ if (!sp.init()) {
+ return;
+ }
+
+ DumpOptions options;
+ options.runtimeData = true;
+ options.recursive = true;
+ if (!dump(cx, script, options, &sp)) {
+ return;
+ }
+
+ fprintf(stderr, "%s\n", sp.string());
+}
+
+static void DumpMutableScriptFlags(js::JSONPrinter& json,
+ MutableScriptFlags mutableFlags) {
+ // Skip warmup data.
+ static_assert(int(MutableScriptFlagsEnum::WarmupResets_MASK) == 0xff);
+
+ for (uint32_t i = 0x100; i; i = i << 1) {
+ if (uint32_t(mutableFlags) & i) {
+ switch (MutableScriptFlagsEnum(i)) {
+ case MutableScriptFlagsEnum::HasRunOnce:
+ json.value("HasRunOnce");
+ break;
+ case MutableScriptFlagsEnum::HasBeenCloned:
+ json.value("HasBeenCloned");
+ break;
+ case MutableScriptFlagsEnum::HasScriptCounts:
+ json.value("HasScriptCounts");
+ break;
+ case MutableScriptFlagsEnum::HasDebugScript:
+ json.value("HasDebugScript");
+ break;
+ case MutableScriptFlagsEnum::AllowRelazify:
+ json.value("AllowRelazify");
+ break;
+ case MutableScriptFlagsEnum::SpewEnabled:
+ json.value("SpewEnabled");
+ break;
+ case MutableScriptFlagsEnum::NeedsFinalWarmUpCount:
+ json.value("NeedsFinalWarmUpCount");
+ break;
+ case MutableScriptFlagsEnum::BaselineDisabled:
+ json.value("BaselineDisabled");
+ break;
+ case MutableScriptFlagsEnum::IonDisabled:
+ json.value("IonDisabled");
+ break;
+ case MutableScriptFlagsEnum::Uninlineable:
+ json.value("Uninlineable");
+ break;
+ case MutableScriptFlagsEnum::NoEagerBaselineHint:
+ json.value("NoEagerBaselineHint");
+ break;
+ case MutableScriptFlagsEnum::FailedBoundsCheck:
+ json.value("FailedBoundsCheck");
+ break;
+ case MutableScriptFlagsEnum::HadLICMInvalidation:
+ json.value("HadLICMInvalidation");
+ break;
+ case MutableScriptFlagsEnum::HadReorderingBailout:
+ json.value("HadReorderingBailout");
+ break;
+ case MutableScriptFlagsEnum::HadEagerTruncationBailout:
+ json.value("HadEagerTruncationBailout");
+ break;
+ case MutableScriptFlagsEnum::FailedLexicalCheck:
+ json.value("FailedLexicalCheck");
+ break;
+ case MutableScriptFlagsEnum::HadSpeculativePhiBailout:
+ json.value("HadSpeculativePhiBailout");
+ break;
+ case MutableScriptFlagsEnum::HadUnboxFoldingBailout:
+ json.value("HadUnboxFoldingBailout");
+ break;
+ default:
+ json.value("Unknown(%x)", i);
+ break;
+ }
+ }
+ }
+}
+
+/* static */
+bool JSScript::dump(JSContext* cx, JS::Handle<JSScript*> script,
+ DumpOptions& options, js::Sprinter* sp) {
+ {
+ JSONPrinter json(*sp);
+
+ json.beginObject();
+
+ if (const char* filename = script->filename()) {
+ json.property("file", filename);
+ } else {
+ json.nullProperty("file");
+ }
+
+ json.property("lineno", script->lineno());
+ json.property("column", script->column());
+
+ json.beginListProperty("immutableFlags");
+ DumpImmutableScriptFlags(json, script->immutableFlags());
+ json.endList();
+
+ if (options.runtimeData) {
+ json.beginListProperty("mutableFlags");
+ DumpMutableScriptFlags(json, script->mutableFlags_);
+ json.endList();
+ }
+
+ if (script->isFunction()) {
+ JS::Rooted<JSFunction*> fun(cx, script->function());
+
+ JS::Rooted<JSAtom*> name(cx, fun->displayAtom());
+ if (name) {
+ UniqueChars bytes = JS_EncodeStringToUTF8(cx, name);
+ if (!bytes) {
+ return false;
+ }
+ json.property("functionName", bytes.get());
+ } else {
+ json.nullProperty("functionName");
+ }
+
+ json.beginListProperty("functionFlags");
+ DumpFunctionFlagsItems(json, fun->flags());
+ json.endList();
+ }
+
+ json.endObject();
+ }
+
+ if (sp->hadOutOfMemory()) {
+ return false;
+ }
+
+ if (!sp->put("\n")) {
+ return false;
+ }
+
+ if (!Disassemble(cx, script, /* lines = */ true, sp)) {
+ return false;
+ }
+ if (!dumpSrcNotes(cx, script, sp)) {
+ return false;
+ }
+ if (!dumpTryNotes(cx, script, sp)) {
+ return false;
+ }
+ if (!dumpScopeNotes(cx, script, sp)) {
+ return false;
+ }
+ if (!dumpGCThings(cx, script, sp)) {
+ return false;
+ }
+
+ if (options.recursive) {
+ for (JS::GCCellPtr gcThing : script->gcthings()) {
+ if (!gcThing.is<JSObject>()) {
+ continue;
+ }
+
+ JSObject* obj = &gcThing.as<JSObject>();
+ if (obj->is<JSFunction>()) {
+ if (!sp->put("\n")) {
+ return false;
+ }
+
+ JS::Rooted<JSFunction*> fun(cx, &obj->as<JSFunction>());
+ if (fun->isInterpreted()) {
+ JS::Rooted<JSScript*> innerScript(
+ cx, JSFunction::getOrCreateScript(cx, fun));
+ if (!innerScript) {
+ return false;
+ }
+ if (!dump(cx, innerScript, options, sp)) {
+ return false;
+ }
+ } else {
+ if (!sp->put("[native code]\n")) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+/* static */
+bool JSScript::dumpSrcNotes(JSContext* cx, JS::Handle<JSScript*> script,
+ js::Sprinter* sp) {
+ if (!sp->put("\nSource notes:\n") ||
+ !sp->jsprintf("%4s %4s %6s %5s %6s %-10s %s\n", "ofs", "line", "column",
+ "pc", "delta", "desc", "args") ||
+ !sp->put("---- ---- ------ ----- ------ ---------- ------\n")) {
+ return false;
+ }
+
+ unsigned offset = 0;
+ unsigned lineno = script->lineno();
+ unsigned column = script->column();
+ SrcNote* notes = script->notes();
+ for (SrcNoteIterator iter(notes); !iter.atEnd(); ++iter) {
+ const auto* sn = *iter;
+
+ unsigned delta = sn->delta();
+ offset += delta;
+ SrcNoteType type = sn->type();
+ const char* name = sn->name();
+ if (!sp->jsprintf("%3u: %4u %6u %5u [%4u] %-10s", unsigned(sn - notes),
+ lineno, column, offset, delta, name)) {
+ return false;
+ }
+
+ switch (type) {
+ case SrcNoteType::Null:
+ case SrcNoteType::AssignOp:
+ case SrcNoteType::Breakpoint:
+ case SrcNoteType::StepSep:
+ case SrcNoteType::XDelta:
+ break;
+
+ case SrcNoteType::ColSpan: {
+ uint32_t colspan = SrcNote::ColSpan::getSpan(sn);
+ if (!sp->jsprintf(" colspan %u", colspan)) {
+ return false;
+ }
+ column += colspan;
+ break;
+ }
+
+ case SrcNoteType::SetLine:
+ lineno = SrcNote::SetLine::getLine(sn, script->lineno());
+ if (!sp->jsprintf(" lineno %u", lineno)) {
+ return false;
+ }
+ column = 0;
+ break;
+
+ case SrcNoteType::NewLine:
+ ++lineno;
+ column = 0;
+ break;
+
+ default:
+ MOZ_ASSERT_UNREACHABLE("unrecognized srcnote");
+ }
+ if (!sp->put("\n")) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static const char* TryNoteName(TryNoteKind kind) {
+ switch (kind) {
+ case TryNoteKind::Catch:
+ return "catch";
+ case TryNoteKind::Finally:
+ return "finally";
+ case TryNoteKind::ForIn:
+ return "for-in";
+ case TryNoteKind::ForOf:
+ return "for-of";
+ case TryNoteKind::Loop:
+ return "loop";
+ case TryNoteKind::ForOfIterClose:
+ return "for-of-iterclose";
+ case TryNoteKind::Destructuring:
+ return "destructuring";
+ }
+
+ MOZ_CRASH("Bad TryNoteKind");
+}
+
+/* static */
+bool JSScript::dumpTryNotes(JSContext* cx, JS::Handle<JSScript*> script,
+ js::Sprinter* sp) {
+ if (!sp->put(
+ "\nException table:\nkind stack start end\n")) {
+ return false;
+ }
+
+ for (const js::TryNote& tn : script->trynotes()) {
+ if (!sp->jsprintf(" %-16s %6u %8u %8u\n", TryNoteName(tn.kind()),
+ tn.stackDepth, tn.start, tn.start + tn.length)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/* static */
+bool JSScript::dumpScopeNotes(JSContext* cx, JS::Handle<JSScript*> script,
+ js::Sprinter* sp) {
+ if (!sp->put("\nScope notes:\n index parent start end\n")) {
+ return false;
+ }
+
+ for (const ScopeNote& note : script->scopeNotes()) {
+ if (note.index == ScopeNote::NoScopeIndex) {
+ if (!sp->jsprintf("%8s ", "(none)")) {
+ return false;
+ }
+ } else {
+ if (!sp->jsprintf("%8u ", note.index.index)) {
+ return false;
+ }
+ }
+ if (note.parent == ScopeNote::NoScopeIndex) {
+ if (!sp->jsprintf("%8s ", "(none)")) {
+ return false;
+ }
+ } else {
+ if (!sp->jsprintf("%8u ", note.parent)) {
+ return false;
+ }
+ }
+ if (!sp->jsprintf("%8u %8u\n", note.start, note.start + note.length)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/* static */
+bool JSScript::dumpGCThings(JSContext* cx, JS::Handle<JSScript*> script,
+ js::Sprinter* sp) {
+ if (!sp->put("\nGC things:\n index type value\n")) {
+ return false;
+ }
+
+ size_t i = 0;
+ for (JS::GCCellPtr gcThing : script->gcthings()) {
+ if (!sp->jsprintf("%8zu ", i)) {
+ return false;
+ }
+ if (gcThing.is<JS::BigInt>()) {
+ if (!sp->put("BigInt ")) {
+ return false;
+ }
+ gcThing.as<JS::BigInt>().dump(*sp);
+ if (!sp->put("\n")) {
+ return false;
+ }
+ } else if (gcThing.is<Scope>()) {
+ if (!sp->put("Scope ")) {
+ return false;
+ }
+ JS::Rooted<Scope*> scope(cx, &gcThing.as<Scope>());
+ if (!Scope::dumpForDisassemble(cx, scope, *sp,
+ " ")) {
+ return false;
+ }
+ if (!sp->put("\n")) {
+ return false;
+ }
+ } else if (gcThing.is<JSObject>()) {
+ JSObject* obj = &gcThing.as<JSObject>();
+ if (obj->is<JSFunction>()) {
+ if (!sp->put("Function ")) {
+ return false;
+ }
+ JS::Rooted<JSFunction*> fun(cx, &obj->as<JSFunction>());
+ if (fun->displayAtom()) {
+ JS::Rooted<JSAtom*> name(cx, fun->displayAtom());
+ JS::UniqueChars utf8chars = JS_EncodeStringToUTF8(cx, name);
+ if (!utf8chars) {
+ return false;
+ }
+ if (!sp->put(utf8chars.get())) {
+ return false;
+ }
+ } else {
+ if (!sp->put("(anonymous)")) {
+ return false;
+ }
+ }
+
+ if (fun->hasBaseScript()) {
+ BaseScript* script = fun->baseScript();
+ if (!sp->jsprintf(" @ %u:%u\n", script->lineno(), script->column())) {
+ return false;
+ }
+ } else {
+ if (!sp->put(" (no script)\n")) {
+ return false;
+ }
+ }
+ } else {
+ if (obj->is<RegExpObject>()) {
+ if (!sp->put("RegExp ")) {
+ return false;
+ }
+ } else {
+ if (!sp->put("Object ")) {
+ return false;
+ }
+ }
+
+ JS::Rooted<JS::Value> objValue(cx, ObjectValue(*obj));
+ JS::Rooted<JSString*> str(cx, ValueToSource(cx, objValue));
+ if (!str) {
+ return false;
+ }
+ JS::UniqueChars utf8chars = JS_EncodeStringToUTF8(cx, str);
+ if (!utf8chars) {
+ return false;
+ }
+ if (!sp->put(utf8chars.get())) {
+ return false;
+ }
+
+ if (!sp->put("\n")) {
+ return false;
+ }
+ }
+ } else if (gcThing.is<JSString>()) {
+ JS::Rooted<JSString*> str(cx, &gcThing.as<JSString>());
+ if (str->isAtom()) {
+ if (!sp->put("Atom ")) {
+ return false;
+ }
+ } else {
+ if (!sp->put("String ")) {
+ return false;
+ }
+ }
+ JS::UniqueChars chars = QuoteString(cx, str, '"');
+ if (!chars) {
+ return false;
+ }
+ if (!sp->put(chars.get())) {
+ return false;
+ }
+ if (!sp->put("\n")) {
+ return false;
+ }
+ } else {
+ if (!sp->put("Unknown\n")) {
+ return false;
+ }
+ }
+ i++;
+ }
+
+ return true;
+}
+
+#endif // defined(DEBUG) || defined(JS_JITSPEW)
+
+void JSScript::AutoDelazify::holdScript(JS::HandleFunction fun) {
+ if (fun) {
+ JSAutoRealm ar(cx_, fun);
+ script_ = JSFunction::getOrCreateScript(cx_, fun);
+ if (script_) {
+ oldAllowRelazify_ = script_->allowRelazify();
+ script_->clearAllowRelazify();
+ }
+ }
+}
+
+void JSScript::AutoDelazify::dropScript() {
+ if (script_) {
+ script_->setAllowRelazify(oldAllowRelazify_);
+ }
+ script_ = nullptr;
+}
+
+JS::ubi::Base::Size JS::ubi::Concrete<BaseScript>::size(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ BaseScript* base = &get();
+
+ Size size = gc::Arena::thingSize(base->getAllocKind());
+ size += base->sizeOfExcludingThis(mallocSizeOf);
+
+ // Include any JIT data if it exists.
+ if (base->hasJitScript()) {
+ JSScript* script = base->asJSScript();
+
+ size_t jitScriptSize = 0;
+ size_t fallbackStubSize = 0;
+ script->addSizeOfJitScript(mallocSizeOf, &jitScriptSize, &fallbackStubSize);
+ size += jitScriptSize;
+ size += fallbackStubSize;
+
+ size_t baselineSize = 0;
+ jit::AddSizeOfBaselineData(script, mallocSizeOf, &baselineSize);
+ size += baselineSize;
+
+ size += jit::SizeOfIonData(script, mallocSizeOf);
+ }
+
+ MOZ_ASSERT(size > 0);
+ return size;
+}
+
+const char* JS::ubi::Concrete<BaseScript>::scriptFilename() const {
+ return get().filename();
+}
diff --git a/js/src/vm/JSScript.h b/js/src/vm/JSScript.h
new file mode 100644
index 0000000000..8fadad9ab5
--- /dev/null
+++ b/js/src/vm/JSScript.h
@@ -0,0 +1,2265 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JS script descriptor. */
+
+#ifndef vm_JSScript_h
+#define vm_JSScript_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MaybeOneOf.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/Span.h"
+
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Utf8.h"
+#include "mozilla/Variant.h"
+
+#include <type_traits> // std::is_same
+#include <utility> // std::move
+
+#include "jstypes.h"
+
+#include "frontend/ScriptIndex.h" // ScriptIndex
+#include "gc/Barrier.h"
+#include "js/CompileOptions.h"
+#include "js/Transcoding.h"
+#include "js/UbiNode.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "util/TrailingArray.h"
+#include "vm/BytecodeIterator.h"
+#include "vm/BytecodeLocation.h"
+#include "vm/BytecodeUtil.h"
+#include "vm/MutexIDs.h" // mutexid
+#include "vm/NativeObject.h"
+#include "vm/SharedImmutableStringsCache.h"
+#include "vm/SharedStencil.h" // js::GCThingIndex, js::SourceExtent, js::SharedImmutableScriptData, MemberInitializers
+#include "vm/StencilEnums.h" // SourceRetrievable
+
+namespace JS {
+struct ScriptSourceInfo;
+template <typename UnitT>
+class SourceText;
+} // namespace JS
+
+namespace js {
+
+class FrontendContext;
+class ScriptSource;
+
+class VarScope;
+class LexicalScope;
+
+class JS_PUBLIC_API Sprinter;
+
+namespace coverage {
+class LCovSource;
+} // namespace coverage
+
+namespace gc {
+class AllocSite;
+} // namespace gc
+
+namespace jit {
+class AutoKeepJitScripts;
+class BaselineScript;
+class IonScript;
+struct IonScriptCounts;
+class JitScript;
+} // namespace jit
+
+class ModuleObject;
+class RegExpObject;
+class SourceCompressionTask;
+class Shape;
+class SrcNote;
+class DebugScript;
+
+namespace frontend {
+struct CompilationStencil;
+struct ExtensibleCompilationStencil;
+struct CompilationGCOutput;
+struct CompilationStencilMerger;
+class StencilXDR;
+} // namespace frontend
+
+class ScriptCounts {
+ public:
+ typedef mozilla::Vector<PCCounts, 0, SystemAllocPolicy> PCCountsVector;
+
+ inline ScriptCounts();
+ inline explicit ScriptCounts(PCCountsVector&& jumpTargets);
+ inline ScriptCounts(ScriptCounts&& src);
+ inline ~ScriptCounts();
+
+ inline ScriptCounts& operator=(ScriptCounts&& src);
+
+ // Return the counter used to count the number of visits. Returns null if
+ // the element is not found.
+ PCCounts* maybeGetPCCounts(size_t offset);
+ const PCCounts* maybeGetPCCounts(size_t offset) const;
+
+ // PCCounts are stored at jump-target offsets. This function looks for the
+ // previous PCCount which is in the same basic block as the current offset.
+ PCCounts* getImmediatePrecedingPCCounts(size_t offset);
+
+ // Return the counter used to count the number of throws. Returns null if
+ // the element is not found.
+ const PCCounts* maybeGetThrowCounts(size_t offset) const;
+
+ // Throw counts are stored at the location of each throwing
+ // instruction. This function looks for the previous throw count.
+ //
+ // Note: if the offset of the returned count is higher than the offset of
+ // the immediate preceding PCCount, then this throw happened in the same
+ // basic block.
+ const PCCounts* getImmediatePrecedingThrowCounts(size_t offset) const;
+
+ // Return the counter used to count the number of throws. Allocate it if
+ // none exists yet. Returns null if the allocation failed.
+ PCCounts* getThrowCounts(size_t offset);
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf);
+
+ bool traceWeak(JSTracer* trc) { return true; }
+
+ private:
+ friend class ::JSScript;
+ friend struct ScriptAndCounts;
+
+ // This sorted array is used to map an offset to the number of times a
+ // branch got visited.
+ PCCountsVector pcCounts_;
+
+ // This sorted vector is used to map an offset to the number of times an
+ // instruction throw.
+ PCCountsVector throwCounts_;
+
+ // Information about any Ion compilations for the script.
+ jit::IonScriptCounts* ionCounts_;
+};
+
+// The key of these side-table hash maps are intentionally not traced GC
+// references to JSScript. Instead, we use bare pointers and manually fix up
+// when objects could have moved (see Zone::fixupScriptMapsAfterMovingGC) and
+// remove when the realm is destroyed (see Zone::clearScriptCounts and
+// Zone::clearScriptNames). They essentially behave as weak references, except
+// that the references are not cleared early by the GC. They must be non-strong
+// references because the tables are kept at the Zone level and otherwise the
+// table keys would keep scripts alive, thus keeping Realms alive, beyond their
+// expected lifetimes. However, We do not use actual weak references (e.g. as
+// used by WeakMap tables provided in gc/WeakMap.h) because they would be
+// collected before the calls to the JSScript::finalize function which are used
+// to aggregate code coverage results on the realm.
+//
+// Note carefully, however, that there is an exceptional case for which we *do*
+// want the JSScripts to be strong references (and thus traced): when the
+// --dump-bytecode command line option or the PCCount JSFriend API is used,
+// then the scripts for all counts must remain alive. See
+// Zone::traceScriptTableRoots() for more details.
+//
+// TODO: Clean this up by either aggregating coverage results in some other
+// way, or by tweaking sweep ordering.
+using UniqueScriptCounts = js::UniquePtr<ScriptCounts>;
+using ScriptCountsMap =
+ GCRekeyableHashMap<HeapPtr<BaseScript*>, UniqueScriptCounts,
+ DefaultHasher<HeapPtr<BaseScript*>>, SystemAllocPolicy>;
+
+// The 'const char*' for the function name is a pointer within the LCovSource's
+// LifoAlloc and will be discarded at the same time.
+using ScriptLCovEntry = std::tuple<coverage::LCovSource*, const char*>;
+using ScriptLCovMap =
+ GCRekeyableHashMap<HeapPtr<BaseScript*>, ScriptLCovEntry,
+ DefaultHasher<HeapPtr<BaseScript*>>, SystemAllocPolicy>;
+
+#ifdef MOZ_VTUNE
+using ScriptVTuneIdMap =
+ GCRekeyableHashMap<HeapPtr<BaseScript*>, uint32_t,
+ DefaultHasher<HeapPtr<BaseScript*>>, SystemAllocPolicy>;
+#endif
+#ifdef JS_CACHEIR_SPEW
+using ScriptFinalWarmUpCountEntry = std::tuple<uint32_t, SharedImmutableString>;
+using ScriptFinalWarmUpCountMap =
+ GCRekeyableHashMap<HeapPtr<BaseScript*>, ScriptFinalWarmUpCountEntry,
+ DefaultHasher<HeapPtr<BaseScript*>>, SystemAllocPolicy>;
+#endif
+
+// As we execute JS sources that used lazy parsing, we may generate additional
+// bytecode that we would like to include in caches if they are being used.
+// There is a dependency cycle between JSScript / ScriptSource /
+// CompilationStencil for this scenario so introduce this smart-ptr wrapper to
+// avoid needing the full details of the stencil-merger in this file.
+class StencilIncrementalEncoderPtr {
+ public:
+ frontend::CompilationStencilMerger* merger_ = nullptr;
+
+ StencilIncrementalEncoderPtr() = default;
+ ~StencilIncrementalEncoderPtr() { reset(); }
+
+ bool hasEncoder() const { return bool(merger_); }
+
+ void reset();
+
+ bool setInitial(JSContext* cx,
+ UniquePtr<frontend::ExtensibleCompilationStencil>&& initial);
+
+ bool addDelazification(JSContext* cx,
+ const frontend::CompilationStencil& delazification);
+};
+
+struct ScriptSourceChunk {
+ ScriptSource* ss = nullptr;
+ uint32_t chunk = 0;
+
+ ScriptSourceChunk() = default;
+
+ ScriptSourceChunk(ScriptSource* ss, uint32_t chunk) : ss(ss), chunk(chunk) {
+ MOZ_ASSERT(valid());
+ }
+
+ bool valid() const { return ss != nullptr; }
+
+ bool operator==(const ScriptSourceChunk& other) const {
+ return ss == other.ss && chunk == other.chunk;
+ }
+};
+
+struct ScriptSourceChunkHasher {
+ using Lookup = ScriptSourceChunk;
+
+ static HashNumber hash(const ScriptSourceChunk& ssc) {
+ return mozilla::AddToHash(DefaultHasher<ScriptSource*>::hash(ssc.ss),
+ ssc.chunk);
+ }
+ static bool match(const ScriptSourceChunk& c1, const ScriptSourceChunk& c2) {
+ return c1 == c2;
+ }
+};
+
+template <typename Unit>
+using EntryUnits = mozilla::UniquePtr<Unit[], JS::FreePolicy>;
+
+// The uncompressed source cache contains *either* UTF-8 source data *or*
+// UTF-16 source data. ScriptSourceChunk implies a ScriptSource that
+// contains either UTF-8 data or UTF-16 data, so the nature of the key to
+// Map below indicates how each SourceData ought to be interpreted.
+using SourceData = mozilla::UniquePtr<void, JS::FreePolicy>;
+
+template <typename Unit>
+inline SourceData ToSourceData(EntryUnits<Unit> chars) {
+ static_assert(std::is_same_v<SourceData::DeleterType,
+ typename EntryUnits<Unit>::DeleterType>,
+ "EntryUnits and SourceData must share the same deleter "
+ "type, that need not know the type of the data being freed, "
+ "for the upcast below to be safe");
+ return SourceData(chars.release());
+}
+
+class UncompressedSourceCache {
+ using Map = HashMap<ScriptSourceChunk, SourceData, ScriptSourceChunkHasher,
+ SystemAllocPolicy>;
+
+ public:
+ // Hold an entry in the source data cache and prevent it from being purged on
+ // GC.
+ class AutoHoldEntry {
+ UncompressedSourceCache* cache_ = nullptr;
+ ScriptSourceChunk sourceChunk_ = {};
+ SourceData data_ = nullptr;
+
+ public:
+ explicit AutoHoldEntry() = default;
+
+ ~AutoHoldEntry() {
+ if (cache_) {
+ MOZ_ASSERT(sourceChunk_.valid());
+ cache_->releaseEntry(*this);
+ }
+ }
+
+ template <typename Unit>
+ void holdUnits(EntryUnits<Unit> units) {
+ MOZ_ASSERT(!cache_);
+ MOZ_ASSERT(!sourceChunk_.valid());
+ MOZ_ASSERT(!data_);
+
+ data_ = ToSourceData(std::move(units));
+ }
+
+ private:
+ void holdEntry(UncompressedSourceCache* cache,
+ const ScriptSourceChunk& sourceChunk) {
+ // Initialise the holder for a specific cache and script source.
+ // This will hold on to the cached source chars in the event that
+ // the cache is purged.
+ MOZ_ASSERT(!cache_);
+ MOZ_ASSERT(!sourceChunk_.valid());
+ MOZ_ASSERT(!data_);
+
+ cache_ = cache;
+ sourceChunk_ = sourceChunk;
+ }
+
+ void deferDelete(SourceData data) {
+ // Take ownership of source chars now the cache is being purged. Remove
+ // our reference to the ScriptSource which might soon be destroyed.
+ MOZ_ASSERT(cache_);
+ MOZ_ASSERT(sourceChunk_.valid());
+ MOZ_ASSERT(!data_);
+
+ cache_ = nullptr;
+ sourceChunk_ = ScriptSourceChunk();
+
+ data_ = std::move(data);
+ }
+
+ const ScriptSourceChunk& sourceChunk() const { return sourceChunk_; }
+ friend class UncompressedSourceCache;
+ };
+
+ private:
+ UniquePtr<Map> map_ = nullptr;
+ AutoHoldEntry* holder_ = nullptr;
+
+ public:
+ UncompressedSourceCache() = default;
+
+ template <typename Unit>
+ const Unit* lookup(const ScriptSourceChunk& ssc, AutoHoldEntry& asp);
+
+ bool put(const ScriptSourceChunk& ssc, SourceData data, AutoHoldEntry& asp);
+
+ void purge();
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
+
+ private:
+ void holdEntry(AutoHoldEntry& holder, const ScriptSourceChunk& ssc);
+ void releaseEntry(AutoHoldEntry& holder);
+};
+
+template <typename Unit>
+struct SourceTypeTraits;
+
+template <>
+struct SourceTypeTraits<mozilla::Utf8Unit> {
+ using CharT = char;
+ using SharedImmutableString = js::SharedImmutableString;
+
+ static const mozilla::Utf8Unit* units(const SharedImmutableString& string) {
+ // Casting |char| data to |Utf8Unit| is safe because |Utf8Unit|
+ // contains a |char|. See the long comment in |Utf8Unit|'s definition.
+ return reinterpret_cast<const mozilla::Utf8Unit*>(string.chars());
+ }
+
+ static char* toString(const mozilla::Utf8Unit* units) {
+ auto asUnsigned =
+ const_cast<unsigned char*>(mozilla::Utf8AsUnsignedChars(units));
+ return reinterpret_cast<char*>(asUnsigned);
+ }
+
+ static UniqueChars toCacheable(EntryUnits<mozilla::Utf8Unit> str) {
+ // The cache only stores strings of |char| or |char16_t|, and right now
+ // it seems best not to gunk up the cache with |Utf8Unit| too. So
+ // cache |Utf8Unit| strings by interpreting them as |char| strings.
+ char* chars = toString(str.release());
+ return UniqueChars(chars);
+ }
+};
+
+template <>
+struct SourceTypeTraits<char16_t> {
+ using CharT = char16_t;
+ using SharedImmutableString = js::SharedImmutableTwoByteString;
+
+ static const char16_t* units(const SharedImmutableString& string) {
+ return string.chars();
+ }
+
+ static char16_t* toString(const char16_t* units) {
+ return const_cast<char16_t*>(units);
+ }
+
+ static UniqueTwoByteChars toCacheable(EntryUnits<char16_t> str) {
+ return UniqueTwoByteChars(std::move(str));
+ }
+};
+
+// Synchronously compress the source of |script|, for testing purposes.
+[[nodiscard]] extern bool SynchronouslyCompressSource(
+ JSContext* cx, JS::Handle<BaseScript*> script);
+
+// [SMDOC] ScriptSource
+//
+// This class abstracts over the source we used to compile from. The current
+// representation may transition to different modes in order to save memory.
+// Abstractly the source may be one of UTF-8 or UTF-16. The data itself may be
+// unavailable, retrieveable-using-source-hook, compressed, or uncompressed. If
+// source is retrieved or decompressed for use, we may update the ScriptSource
+// to hold the result.
+class ScriptSource {
+ // NOTE: While ScriptSources may be compressed off thread, they are only
+ // modified by the main thread, and all members are always safe to access
+ // on the main thread.
+
+ friend class SourceCompressionTask;
+ friend bool SynchronouslyCompressSource(JSContext* cx,
+ JS::Handle<BaseScript*> script);
+
+ friend class frontend::StencilXDR;
+
+ private:
+ // Common base class of the templated variants of PinnedUnits<T>.
+ class PinnedUnitsBase {
+ protected:
+ ScriptSource* source_;
+
+ explicit PinnedUnitsBase(ScriptSource* source) : source_(source) {}
+ };
+
+ public:
+ // Any users that wish to manipulate the char buffer of the ScriptSource
+ // needs to do so via PinnedUnits for GC safety. A GC may compress
+ // ScriptSources. If the source were initially uncompressed, then any raw
+ // pointers to the char buffer would now point to the freed, uncompressed
+ // chars. This is analogous to Rooted.
+ template <typename Unit>
+ class PinnedUnits : public PinnedUnitsBase {
+ const Unit* units_;
+
+ public:
+ PinnedUnits(JSContext* cx, ScriptSource* source,
+ UncompressedSourceCache::AutoHoldEntry& holder, size_t begin,
+ size_t len);
+
+ ~PinnedUnits();
+
+ const Unit* get() const { return units_; }
+
+ const typename SourceTypeTraits<Unit>::CharT* asChars() const {
+ return SourceTypeTraits<Unit>::toString(get());
+ }
+ };
+
+ private:
+ // Missing source text that isn't retrievable using the source hook. (All
+ // ScriptSources initially begin in this state. Users that are compiling
+ // source text will overwrite |data| to store a different state.)
+ struct Missing {};
+
+ // Source that can be retrieved using the registered source hook. |Unit|
+ // records the source type so that source-text coordinates in functions and
+ // scripts that depend on this |ScriptSource| are correct.
+ template <typename Unit>
+ struct Retrievable {
+ // The source hook and script URL required to retrieve source are stored
+ // elsewhere, so nothing is needed here. It'd be better hygiene to store
+ // something source-hook-like in each |ScriptSource| that needs it, but that
+ // requires reimagining a source-hook API that currently depends on source
+ // hooks being uniquely-owned pointers...
+ };
+
+ // Uncompressed source text. Templates distinguish if we are interconvertable
+ // to |Retrievable| or not.
+ template <typename Unit>
+ class UncompressedData {
+ typename SourceTypeTraits<Unit>::SharedImmutableString string_;
+
+ public:
+ explicit UncompressedData(
+ typename SourceTypeTraits<Unit>::SharedImmutableString str)
+ : string_(std::move(str)) {}
+
+ const Unit* units() const { return SourceTypeTraits<Unit>::units(string_); }
+
+ size_t length() const { return string_.length(); }
+ };
+
+ template <typename Unit, SourceRetrievable CanRetrieve>
+ class Uncompressed : public UncompressedData<Unit> {
+ using Base = UncompressedData<Unit>;
+
+ public:
+ using Base::Base;
+ };
+
+ // Compressed source text. Templates distinguish if we are interconvertable
+ // to |Retrievable| or not.
+ template <typename Unit>
+ struct CompressedData {
+ // Single-byte compressed text, regardless whether the original text
+ // was single-byte or two-byte.
+ SharedImmutableString raw;
+ size_t uncompressedLength;
+
+ CompressedData(SharedImmutableString raw, size_t uncompressedLength)
+ : raw(std::move(raw)), uncompressedLength(uncompressedLength) {}
+ };
+
+ template <typename Unit, SourceRetrievable CanRetrieve>
+ struct Compressed : public CompressedData<Unit> {
+ using Base = CompressedData<Unit>;
+
+ public:
+ using Base::Base;
+ };
+
+ // The set of currently allowed encoding modes.
+ using SourceType =
+ mozilla::Variant<Compressed<mozilla::Utf8Unit, SourceRetrievable::Yes>,
+ Uncompressed<mozilla::Utf8Unit, SourceRetrievable::Yes>,
+ Compressed<mozilla::Utf8Unit, SourceRetrievable::No>,
+ Uncompressed<mozilla::Utf8Unit, SourceRetrievable::No>,
+ Compressed<char16_t, SourceRetrievable::Yes>,
+ Uncompressed<char16_t, SourceRetrievable::Yes>,
+ Compressed<char16_t, SourceRetrievable::No>,
+ Uncompressed<char16_t, SourceRetrievable::No>,
+ Retrievable<mozilla::Utf8Unit>, Retrievable<char16_t>,
+ Missing>;
+
+ //
+ // Start of fields.
+ //
+
+ mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> refs = {};
+
+ // An id for this source that is unique across the process. This can be used
+ // to refer to this source from places that don't want to hold a strong
+ // reference on the source itself.
+ //
+ // This is a 32 bit ID and could overflow, in which case the ID will not be
+ // unique anymore.
+ uint32_t id_ = 0;
+
+ // Source data (as a mozilla::Variant).
+ SourceType data = SourceType(Missing());
+
+ // If the GC calls triggerConvertToCompressedSource with PinnedUnits present,
+ // the last PinnedUnits instance will install the compressed chars upon
+ // destruction.
+ //
+ // Retrievability isn't part of the type here because uncompressed->compressed
+ // transitions must preserve existing retrievability.
+ struct ReaderInstances {
+ size_t count = 0;
+ mozilla::MaybeOneOf<CompressedData<mozilla::Utf8Unit>,
+ CompressedData<char16_t>>
+ pendingCompressed;
+ };
+ ExclusiveData<ReaderInstances> readers_;
+
+ // The UTF-8 encoded filename of this script.
+ SharedImmutableString filename_;
+
+ // Hash of the script filename;
+ HashNumber filenameHash_ = 0;
+
+ // If this ScriptSource was generated by a code-introduction mechanism such
+ // as |eval| or |new Function|, the debugger needs access to the "raw"
+ // filename of the top-level script that contains the eval-ing code. To
+ // keep track of this, we must preserve the original outermost filename (of
+ // the original introducer script), so that instead of a filename of
+ // "foo.js line 30 > eval line 10 > Function", we can obtain the original
+ // raw filename of "foo.js".
+ //
+ // In the case described above, this field will be set to to the original raw
+ // UTF-8 encoded filename from above, otherwise it will be mozilla::Nothing.
+ SharedImmutableString introducerFilename_;
+
+ SharedImmutableTwoByteString displayURL_;
+ SharedImmutableTwoByteString sourceMapURL_;
+
+ // The bytecode cache encoder is used to encode only the content of function
+ // which are delazified. If this value is not nullptr, then each delazified
+ // function should be recorded before their first execution.
+ StencilIncrementalEncoderPtr xdrEncoder_;
+
+ // A string indicating how this source code was introduced into the system.
+ // This is a constant, statically allocated C string, so does not need memory
+ // management.
+ //
+ // TODO: Document the various additional introduction type constants.
+ const char* introductionType_ = nullptr;
+
+ // Bytecode offset in caller script that generated this code. This is
+ // present for eval-ed code, as well as "new Function(...)"-introduced
+ // scripts.
+ mozilla::Maybe<uint32_t> introductionOffset_;
+
+ // If this source is for Function constructor, the position of ")" after
+ // parameter list in the source. This is used to get function body.
+ // 0 for other cases.
+ uint32_t parameterListEnd_ = 0;
+
+ // Line number within the file where this source starts.
+ uint32_t startLine_ = 0;
+ // Column number within the file where this source starts.
+ uint32_t startColumn_ = 0;
+
+ // See: CompileOptions::mutedErrors.
+ bool mutedErrors_ = false;
+
+ // Carry the delazification mode per source.
+ JS::DelazificationOption delazificationMode_ =
+ JS::DelazificationOption::OnDemandOnly;
+
+ // True if an associated SourceCompressionTask was ever created.
+ bool hadCompressionTask_ = false;
+
+ //
+ // End of fields.
+ //
+
+ // How many ids have been handed out to sources.
+ static mozilla::Atomic<uint32_t, mozilla::SequentiallyConsistent> idCount_;
+
+ template <typename Unit>
+ const Unit* chunkUnits(JSContext* cx,
+ UncompressedSourceCache::AutoHoldEntry& holder,
+ size_t chunk);
+
+ // Return a string containing the chars starting at |begin| and ending at
+ // |begin + len|.
+ //
+ // Warning: this is *not* GC-safe! Any chars to be handed out must use
+ // PinnedUnits. See comment below.
+ template <typename Unit>
+ const Unit* units(JSContext* cx, UncompressedSourceCache::AutoHoldEntry& asp,
+ size_t begin, size_t len);
+
+ public:
+ // When creating a JSString* from TwoByte source characters, we don't try to
+ // to deflate to Latin1 for longer strings, because this can be slow.
+ static const size_t SourceDeflateLimit = 100;
+
+ explicit ScriptSource()
+ : id_(++idCount_), readers_(js::mutexid::SourceCompression) {}
+ ~ScriptSource() { MOZ_ASSERT(refs == 0); }
+
+ void AddRef() { refs++; }
+ void Release() {
+ MOZ_ASSERT(refs != 0);
+ if (--refs == 0) {
+ js_delete(this);
+ }
+ }
+ [[nodiscard]] bool initFromOptions(FrontendContext* fc,
+ const JS::ReadOnlyCompileOptions& options);
+
+ /**
+ * The minimum script length (in code units) necessary for a script to be
+ * eligible to be compressed.
+ */
+ static constexpr size_t MinimumCompressibleLength = 256;
+
+ SharedImmutableString getOrCreateStringZ(FrontendContext* fc,
+ UniqueChars&& str);
+ SharedImmutableTwoByteString getOrCreateStringZ(FrontendContext* fc,
+ UniqueTwoByteChars&& str);
+
+ private:
+ class LoadSourceMatcher;
+
+ public:
+ // Attempt to load usable source for |ss| -- source text on which substring
+ // operations and the like can be performed. On success return true and set
+ // |*loaded| to indicate whether usable source could be loaded; otherwise
+ // return false.
+ static bool loadSource(JSContext* cx, ScriptSource* ss, bool* loaded);
+
+ // Assign source data from |srcBuf| to this recently-created |ScriptSource|.
+ template <typename Unit>
+ [[nodiscard]] bool assignSource(FrontendContext* fc,
+ const JS::ReadOnlyCompileOptions& options,
+ JS::SourceText<Unit>& srcBuf);
+
+ bool hasSourceText() const {
+ return hasUncompressedSource() || hasCompressedSource();
+ }
+
+ private:
+ template <typename Unit>
+ struct UncompressedDataMatcher {
+ template <SourceRetrievable CanRetrieve>
+ const UncompressedData<Unit>* operator()(
+ const Uncompressed<Unit, CanRetrieve>& u) {
+ return &u;
+ }
+
+ template <typename T>
+ const UncompressedData<Unit>* operator()(const T&) {
+ MOZ_CRASH(
+ "attempting to access uncompressed data in a ScriptSource not "
+ "containing it");
+ return nullptr;
+ }
+ };
+
+ public:
+ template <typename Unit>
+ const UncompressedData<Unit>* uncompressedData() {
+ return data.match(UncompressedDataMatcher<Unit>());
+ }
+
+ private:
+ template <typename Unit>
+ struct CompressedDataMatcher {
+ template <SourceRetrievable CanRetrieve>
+ const CompressedData<Unit>* operator()(
+ const Compressed<Unit, CanRetrieve>& c) {
+ return &c;
+ }
+
+ template <typename T>
+ const CompressedData<Unit>* operator()(const T&) {
+ MOZ_CRASH(
+ "attempting to access compressed data in a ScriptSource not "
+ "containing it");
+ return nullptr;
+ }
+ };
+
+ public:
+ template <typename Unit>
+ const CompressedData<Unit>* compressedData() {
+ return data.match(CompressedDataMatcher<Unit>());
+ }
+
+ private:
+ struct HasUncompressedSource {
+ template <typename Unit, SourceRetrievable CanRetrieve>
+ bool operator()(const Uncompressed<Unit, CanRetrieve>&) {
+ return true;
+ }
+
+ template <typename Unit, SourceRetrievable CanRetrieve>
+ bool operator()(const Compressed<Unit, CanRetrieve>&) {
+ return false;
+ }
+
+ template <typename Unit>
+ bool operator()(const Retrievable<Unit>&) {
+ return false;
+ }
+
+ bool operator()(const Missing&) { return false; }
+ };
+
+ public:
+ bool hasUncompressedSource() const {
+ return data.match(HasUncompressedSource());
+ }
+
+ private:
+ template <typename Unit>
+ struct IsUncompressed {
+ template <SourceRetrievable CanRetrieve>
+ bool operator()(const Uncompressed<Unit, CanRetrieve>&) {
+ return true;
+ }
+
+ template <typename T>
+ bool operator()(const T&) {
+ return false;
+ }
+ };
+
+ public:
+ template <typename Unit>
+ bool isUncompressed() const {
+ return data.match(IsUncompressed<Unit>());
+ }
+
+ private:
+ struct HasCompressedSource {
+ template <typename Unit, SourceRetrievable CanRetrieve>
+ bool operator()(const Compressed<Unit, CanRetrieve>&) {
+ return true;
+ }
+
+ template <typename T>
+ bool operator()(const T&) {
+ return false;
+ }
+ };
+
+ public:
+ bool hasCompressedSource() const { return data.match(HasCompressedSource()); }
+
+ private:
+ template <typename Unit>
+ struct IsCompressed {
+ template <SourceRetrievable CanRetrieve>
+ bool operator()(const Compressed<Unit, CanRetrieve>&) {
+ return true;
+ }
+
+ template <typename T>
+ bool operator()(const T&) {
+ return false;
+ }
+ };
+
+ public:
+ template <typename Unit>
+ bool isCompressed() const {
+ return data.match(IsCompressed<Unit>());
+ }
+
+ private:
+ template <typename Unit>
+ struct SourceTypeMatcher {
+ template <template <typename C, SourceRetrievable R> class Data,
+ SourceRetrievable CanRetrieve>
+ bool operator()(const Data<Unit, CanRetrieve>&) {
+ return true;
+ }
+
+ template <template <typename C, SourceRetrievable R> class Data,
+ typename NotUnit, SourceRetrievable CanRetrieve>
+ bool operator()(const Data<NotUnit, CanRetrieve>&) {
+ return false;
+ }
+
+ bool operator()(const Retrievable<Unit>&) {
+ MOZ_CRASH("source type only applies where actual text is available");
+ return false;
+ }
+
+ template <typename NotUnit>
+ bool operator()(const Retrievable<NotUnit>&) {
+ return false;
+ }
+
+ bool operator()(const Missing&) {
+ MOZ_CRASH("doesn't make sense to ask source type when missing");
+ return false;
+ }
+ };
+
+ public:
+ template <typename Unit>
+ bool hasSourceType() const {
+ return data.match(SourceTypeMatcher<Unit>());
+ }
+
+ private:
+ struct UncompressedLengthMatcher {
+ template <typename Unit, SourceRetrievable CanRetrieve>
+ size_t operator()(const Uncompressed<Unit, CanRetrieve>& u) {
+ return u.length();
+ }
+
+ template <typename Unit, SourceRetrievable CanRetrieve>
+ size_t operator()(const Compressed<Unit, CanRetrieve>& u) {
+ return u.uncompressedLength;
+ }
+
+ template <typename Unit>
+ size_t operator()(const Retrievable<Unit>&) {
+ MOZ_CRASH("ScriptSource::length on a missing-but-retrievable source");
+ return 0;
+ }
+
+ size_t operator()(const Missing& m) {
+ MOZ_CRASH("ScriptSource::length on a missing source");
+ return 0;
+ }
+ };
+
+ public:
+ size_t length() const {
+ MOZ_ASSERT(hasSourceText());
+ return data.match(UncompressedLengthMatcher());
+ }
+
+ JSLinearString* substring(JSContext* cx, size_t start, size_t stop);
+ JSLinearString* substringDontDeflate(JSContext* cx, size_t start,
+ size_t stop);
+
+ [[nodiscard]] bool appendSubstring(JSContext* cx, js::StringBuffer& buf,
+ size_t start, size_t stop);
+
+ void setParameterListEnd(uint32_t parameterListEnd) {
+ parameterListEnd_ = parameterListEnd;
+ }
+
+ bool isFunctionBody() { return parameterListEnd_ != 0; }
+ JSLinearString* functionBodyString(JSContext* cx);
+
+ void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::ScriptSourceInfo* info) const;
+
+ private:
+ // Overwrites |data| with the uncompressed data from |source|.
+ //
+ // This function asserts nothing about |data|. Users should use assertions to
+ // double-check their own understandings of the |data| state transition being
+ // performed.
+ template <typename ContextT, typename Unit>
+ [[nodiscard]] bool setUncompressedSourceHelper(ContextT* cx,
+ EntryUnits<Unit>&& source,
+ size_t length,
+ SourceRetrievable retrievable);
+
+ public:
+ // Initialize a fresh |ScriptSource| with unretrievable, uncompressed source.
+ template <typename Unit>
+ [[nodiscard]] bool initializeUnretrievableUncompressedSource(
+ FrontendContext* fc, EntryUnits<Unit>&& source, size_t length);
+
+ // Set the retrieved source for a |ScriptSource| whose source was recorded as
+ // missing but retrievable.
+ template <typename Unit>
+ [[nodiscard]] bool setRetrievedSource(JSContext* cx,
+ EntryUnits<Unit>&& source,
+ size_t length);
+
+ [[nodiscard]] bool tryCompressOffThread(JSContext* cx);
+
+ // Called by the SourceCompressionTask constructor to indicate such a task was
+ // ever created.
+ void noteSourceCompressionTask() { hadCompressionTask_ = true; }
+
+ // *Trigger* the conversion of this ScriptSource from containing uncompressed
+ // |Unit|-encoded source to containing compressed source. Conversion may not
+ // be complete when this function returns: it'll be delayed if there's ongoing
+ // use of the uncompressed source via |PinnedUnits|, in which case conversion
+ // won't occur until the outermost |PinnedUnits| is destroyed.
+ //
+ // Compressed source is in bytes, no matter that |Unit| might be |char16_t|.
+ // |sourceLength| is the length in code units (not bytes) of the uncompressed
+ // source.
+ template <typename Unit>
+ void triggerConvertToCompressedSource(SharedImmutableString compressed,
+ size_t sourceLength);
+
+ // Initialize a fresh ScriptSource as containing unretrievable compressed
+ // source of the indicated original encoding.
+ template <typename Unit>
+ [[nodiscard]] bool initializeWithUnretrievableCompressedSource(
+ FrontendContext* fc, UniqueChars&& raw, size_t rawLength,
+ size_t sourceLength);
+
+ private:
+ void performTaskWork(SourceCompressionTask* task);
+
+ struct TriggerConvertToCompressedSourceFromTask {
+ ScriptSource* const source_;
+ SharedImmutableString& compressed_;
+
+ TriggerConvertToCompressedSourceFromTask(ScriptSource* source,
+ SharedImmutableString& compressed)
+ : source_(source), compressed_(compressed) {}
+
+ template <typename Unit, SourceRetrievable CanRetrieve>
+ void operator()(const Uncompressed<Unit, CanRetrieve>&) {
+ source_->triggerConvertToCompressedSource<Unit>(std::move(compressed_),
+ source_->length());
+ }
+
+ template <typename Unit, SourceRetrievable CanRetrieve>
+ void operator()(const Compressed<Unit, CanRetrieve>&) {
+ MOZ_CRASH(
+ "can't set compressed source when source is already compressed -- "
+ "ScriptSource::tryCompressOffThread shouldn't have queued up this "
+ "task?");
+ }
+
+ template <typename Unit>
+ void operator()(const Retrievable<Unit>&) {
+ MOZ_CRASH("shouldn't compressing unloaded-but-retrievable source");
+ }
+
+ void operator()(const Missing&) {
+ MOZ_CRASH(
+ "doesn't make sense to set compressed source for missing source -- "
+ "ScriptSource::tryCompressOffThread shouldn't have queued up this "
+ "task?");
+ }
+ };
+
+ template <typename Unit>
+ void convertToCompressedSource(SharedImmutableString compressed,
+ size_t uncompressedLength);
+
+ template <typename Unit>
+ void performDelayedConvertToCompressedSource(
+ ExclusiveData<ReaderInstances>::Guard& g);
+
+ void triggerConvertToCompressedSourceFromTask(
+ SharedImmutableString compressed);
+
+ public:
+ HashNumber filenameHash() const { return filenameHash_; }
+ const char* filename() const {
+ return filename_ ? filename_.chars() : nullptr;
+ }
+ [[nodiscard]] bool setFilename(FrontendContext* fc, const char* filename);
+ [[nodiscard]] bool setFilename(FrontendContext* fc, UniqueChars&& filename);
+
+ const char* introducerFilename() const {
+ return introducerFilename_ ? introducerFilename_.chars() : filename();
+ }
+ [[nodiscard]] bool setIntroducerFilename(FrontendContext* fc,
+ const char* filename);
+ [[nodiscard]] bool setIntroducerFilename(FrontendContext* fc,
+ UniqueChars&& filename);
+
+ bool hasIntroductionType() const { return introductionType_; }
+ const char* introductionType() const {
+ MOZ_ASSERT(hasIntroductionType());
+ return introductionType_;
+ }
+
+ uint32_t id() const { return id_; }
+
+ // Display URLs
+ [[nodiscard]] bool setDisplayURL(FrontendContext* fc, const char16_t* url);
+ [[nodiscard]] bool setDisplayURL(FrontendContext* fc,
+ UniqueTwoByteChars&& url);
+ bool hasDisplayURL() const { return bool(displayURL_); }
+ const char16_t* displayURL() { return displayURL_.chars(); }
+
+ // Source maps
+ [[nodiscard]] bool setSourceMapURL(FrontendContext* fc, const char16_t* url);
+ [[nodiscard]] bool setSourceMapURL(FrontendContext* fc,
+ UniqueTwoByteChars&& url);
+ bool hasSourceMapURL() const { return bool(sourceMapURL_); }
+ const char16_t* sourceMapURL() { return sourceMapURL_.chars(); }
+
+ bool mutedErrors() const { return mutedErrors_; }
+
+ uint32_t startLine() const { return startLine_; }
+ uint32_t startColumn() const { return startColumn_; }
+
+ JS::DelazificationOption delazificationMode() const {
+ return delazificationMode_;
+ }
+
+ bool hasIntroductionOffset() const { return introductionOffset_.isSome(); }
+ uint32_t introductionOffset() const { return introductionOffset_.value(); }
+ void setIntroductionOffset(uint32_t offset) {
+ MOZ_ASSERT(!hasIntroductionOffset());
+ MOZ_ASSERT(offset <= (uint32_t)INT32_MAX);
+ introductionOffset_.emplace(offset);
+ }
+
+ // Return wether an XDR encoder is present or not.
+ bool hasEncoder() const { return xdrEncoder_.hasEncoder(); }
+
+ [[nodiscard]] bool startIncrementalEncoding(
+ JSContext* cx,
+ UniquePtr<frontend::ExtensibleCompilationStencil>&& initial);
+
+ [[nodiscard]] bool addDelazificationToIncrementalEncoding(
+ JSContext* cx, const frontend::CompilationStencil& stencil);
+
+ // Linearize the encoded content in the |buffer| provided as argument to
+ // |xdrEncodeTopLevel|, and free the XDR encoder. In case of errors, the
+ // |buffer| is considered undefined.
+ bool xdrFinalizeEncoder(JSContext* cx, JS::TranscodeBuffer& buffer);
+
+ // Discard the incremental encoding data and free the XDR encoder.
+ void xdrAbortEncoder();
+};
+
+// [SMDOC] ScriptSourceObject
+//
+// ScriptSourceObject stores the ScriptSource and GC pointers related to it.
+class ScriptSourceObject : public NativeObject {
+ static const JSClassOps classOps_;
+
+ public:
+ static const JSClass class_;
+
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+
+ static ScriptSourceObject* create(JSContext* cx, ScriptSource* source);
+
+ // Initialize those properties of this ScriptSourceObject whose values
+ // are provided by |options|, re-wrapping as necessary.
+ static bool initFromOptions(JSContext* cx,
+ JS::Handle<ScriptSourceObject*> source,
+ const JS::InstantiateOptions& options);
+
+ static bool initElementProperties(JSContext* cx,
+ JS::Handle<ScriptSourceObject*> source,
+ HandleString elementAttrName);
+
+ bool hasSource() const { return !getReservedSlot(SOURCE_SLOT).isUndefined(); }
+ ScriptSource* source() const {
+ return static_cast<ScriptSource*>(getReservedSlot(SOURCE_SLOT).toPrivate());
+ }
+
+ JSObject* unwrappedElement(JSContext* cx) const;
+
+ const Value& unwrappedElementAttributeName() const {
+ MOZ_ASSERT(isInitialized());
+ const Value& v = getReservedSlot(ELEMENT_PROPERTY_SLOT);
+ MOZ_ASSERT(!v.isMagic());
+ return v;
+ }
+ BaseScript* unwrappedIntroductionScript() const {
+ MOZ_ASSERT(isInitialized());
+ Value value = getReservedSlot(INTRODUCTION_SCRIPT_SLOT);
+ if (value.isUndefined()) {
+ return nullptr;
+ }
+ return value.toGCThing()->as<BaseScript>();
+ }
+
+ void setPrivate(JSRuntime* rt, const Value& value);
+ void clearPrivate(JSRuntime* rt);
+
+ void setIntroductionScript(const Value& introductionScript) {
+ setReservedSlot(INTRODUCTION_SCRIPT_SLOT, introductionScript);
+ }
+
+ Value getPrivate() const {
+ MOZ_ASSERT(isInitialized());
+ Value value = getReservedSlot(PRIVATE_SLOT);
+ return value;
+ }
+
+ private:
+#ifdef DEBUG
+ bool isInitialized() const {
+ Value element = getReservedSlot(ELEMENT_PROPERTY_SLOT);
+ if (element.isMagic(JS_GENERIC_MAGIC)) {
+ return false;
+ }
+ return !getReservedSlot(INTRODUCTION_SCRIPT_SLOT).isMagic(JS_GENERIC_MAGIC);
+ }
+#endif
+
+ enum {
+ SOURCE_SLOT = 0,
+ ELEMENT_PROPERTY_SLOT,
+ INTRODUCTION_SCRIPT_SLOT,
+ PRIVATE_SLOT,
+ RESERVED_SLOTS
+ };
+};
+
+// ScriptWarmUpData represents a pointer-sized field in BaseScript that stores
+// one of the following using low-bit tags:
+//
+// * The enclosing BaseScript. This is only used while this script is lazy and
+// its containing script is also lazy. This outer script must be compiled
+// before the current script can in order to correctly build the scope chain.
+//
+// * The enclosing Scope. This is only used while this script is lazy and its
+// containing script is compiled. This is the outer scope chain that will be
+// used to compile this scipt.
+//
+// * The script's warm-up count. This is only used until the script has a
+// JitScript. The Baseline Interpreter and JITs use the warm-up count stored
+// in JitScript.
+//
+// * A pointer to the JitScript, when the script is warm enough for the Baseline
+// Interpreter.
+//
+class ScriptWarmUpData {
+ uintptr_t data_ = ResetState();
+
+ private:
+ static constexpr uintptr_t NumTagBits = 2;
+ static constexpr uint32_t MaxWarmUpCount = UINT32_MAX >> NumTagBits;
+
+ public:
+ // Public only for the JITs.
+ static constexpr uintptr_t TagMask = (1 << NumTagBits) - 1;
+ static constexpr uintptr_t JitScriptTag = 0;
+ static constexpr uintptr_t EnclosingScriptTag = 1;
+ static constexpr uintptr_t EnclosingScopeTag = 2;
+ static constexpr uintptr_t WarmUpCountTag = 3;
+
+ private:
+ // A gc-safe value to clear to.
+ constexpr uintptr_t ResetState() { return 0 | WarmUpCountTag; }
+
+ template <uintptr_t Tag>
+ inline void setTaggedPtr(void* ptr) {
+ static_assert(Tag <= TagMask, "Tag must fit in TagMask");
+ MOZ_ASSERT((uintptr_t(ptr) & TagMask) == 0);
+ data_ = uintptr_t(ptr) | Tag;
+ }
+
+ template <typename T, uintptr_t Tag>
+ inline T getTaggedPtr() const {
+ static_assert(Tag <= TagMask, "Tag must fit in TagMask");
+ MOZ_ASSERT((data_ & TagMask) == Tag);
+ return reinterpret_cast<T>(data_ & ~TagMask);
+ }
+
+ void setWarmUpCount(uint32_t count) {
+ if (count > MaxWarmUpCount) {
+ count = MaxWarmUpCount;
+ }
+ data_ = (uintptr_t(count) << NumTagBits) | WarmUpCountTag;
+ }
+
+ public:
+ void trace(JSTracer* trc);
+
+ bool isEnclosingScript() const {
+ return (data_ & TagMask) == EnclosingScriptTag;
+ }
+ bool isEnclosingScope() const {
+ return (data_ & TagMask) == EnclosingScopeTag;
+ }
+ bool isWarmUpCount() const { return (data_ & TagMask) == WarmUpCountTag; }
+ bool isJitScript() const { return (data_ & TagMask) == JitScriptTag; }
+
+ // NOTE: To change type safely, 'clear' the old tagged value and then 'init'
+ // the new one. This will notify the GC appropriately.
+
+ BaseScript* toEnclosingScript() const {
+ return getTaggedPtr<BaseScript*, EnclosingScriptTag>();
+ }
+ inline void initEnclosingScript(BaseScript* enclosingScript);
+ inline void clearEnclosingScript();
+
+ Scope* toEnclosingScope() const {
+ return getTaggedPtr<Scope*, EnclosingScopeTag>();
+ }
+ inline void initEnclosingScope(Scope* enclosingScope);
+ inline void clearEnclosingScope();
+
+ uint32_t toWarmUpCount() const {
+ MOZ_ASSERT(isWarmUpCount());
+ return data_ >> NumTagBits;
+ }
+ void resetWarmUpCount(uint32_t count) {
+ MOZ_ASSERT(isWarmUpCount());
+ setWarmUpCount(count);
+ }
+ void incWarmUpCount() {
+ MOZ_ASSERT(isWarmUpCount());
+ data_ += uintptr_t(1) << NumTagBits;
+ }
+
+ jit::JitScript* toJitScript() const {
+ return getTaggedPtr<jit::JitScript*, JitScriptTag>();
+ }
+ void initJitScript(jit::JitScript* jitScript) {
+ MOZ_ASSERT(isWarmUpCount());
+ setTaggedPtr<JitScriptTag>(jitScript);
+ }
+ void clearJitScript() {
+ MOZ_ASSERT(isJitScript());
+ data_ = ResetState();
+ }
+} JS_HAZ_GC_POINTER;
+
+static_assert(sizeof(ScriptWarmUpData) == sizeof(uintptr_t),
+ "JIT code depends on ScriptWarmUpData being pointer-sized");
+
+// [SMDOC] - JSScript data layout (unshared)
+//
+// PrivateScriptData stores variable-length data associated with a script.
+// Abstractly a PrivateScriptData consists of the following:
+//
+// * A non-empty array of GCCellPtr in gcthings()
+//
+// Accessing this array just requires calling the appropriate public
+// Span-computing function.
+//
+// This class doesn't use the GC barrier wrapper classes. BaseScript::swapData
+// performs a manual pre-write barrier when detaching PrivateScriptData from a
+// script.
+class alignas(uintptr_t) PrivateScriptData final : public TrailingArray {
+ private:
+ uint32_t ngcthings = 0;
+
+ // Note: This is only defined for scripts with an enclosing scope. This
+ // excludes lazy scripts with lazy parents.
+ js::MemberInitializers memberInitializers_ =
+ js::MemberInitializers::Invalid();
+
+ // End of fields.
+
+ private:
+ // Layout helpers
+ Offset gcThingsOffset() { return offsetOfGCThings(); }
+ Offset endOffset() const {
+ uintptr_t size = ngcthings * sizeof(JS::GCCellPtr);
+ return offsetOfGCThings() + size;
+ }
+
+ // Initialize header and PackedSpans
+ explicit PrivateScriptData(uint32_t ngcthings);
+
+ public:
+ static constexpr size_t offsetOfGCThings() {
+ return sizeof(PrivateScriptData);
+ }
+
+ // Accessors for typed array spans.
+ mozilla::Span<JS::GCCellPtr> gcthings() {
+ Offset offset = offsetOfGCThings();
+ return mozilla::Span{offsetToPointer<JS::GCCellPtr>(offset), ngcthings};
+ }
+
+ void setMemberInitializers(MemberInitializers memberInitializers) {
+ MOZ_ASSERT(memberInitializers_.valid == false,
+ "Only init MemberInitializers once");
+ memberInitializers_ = memberInitializers;
+ }
+ const MemberInitializers& getMemberInitializers() {
+ return memberInitializers_;
+ }
+
+ // Allocate a new PrivateScriptData. Headers and GCCellPtrs are initialized.
+ static PrivateScriptData* new_(JSContext* cx, uint32_t ngcthings);
+
+ static bool InitFromStencil(
+ JSContext* cx, js::HandleScript script,
+ const js::frontend::CompilationAtomCache& atomCache,
+ const js::frontend::CompilationStencil& stencil,
+ js::frontend::CompilationGCOutput& gcOutput,
+ const js::frontend::ScriptIndex scriptIndex);
+
+ void trace(JSTracer* trc);
+
+ size_t allocationSize() const;
+
+ // PrivateScriptData has trailing data so isn't copyable or movable.
+ PrivateScriptData(const PrivateScriptData&) = delete;
+ PrivateScriptData& operator=(const PrivateScriptData&) = delete;
+};
+
+// [SMDOC] Script Representation (js::BaseScript)
+//
+// A "script" corresponds to a JavaScript function or a top-level (global, eval,
+// module) body that will be executed using SpiderMonkey bytecode. Note that
+// special forms such as asm.js do not use bytecode or the BaseScript type.
+//
+// BaseScript may be generated directly from the parser/emitter, or by cloning
+// or deserializing another script. Cloning is typically used when a script is
+// needed in multiple realms and we would like to avoid re-compiling.
+//
+// A single script may be shared by multiple JSFunctions in a realm when those
+// function objects are used as closure. In this case, a single JSFunction is
+// considered canonical (and often does not escape to script directly).
+//
+// A BaseScript may be in "lazy" form where the parser performs a syntax-only
+// parse and saves minimal information. These lazy scripts must be recompiled
+// from the source (generating bytecode) before they can execute in a process
+// called "delazification". On GC memory pressure, a fully-compiled script may
+// be converted back into lazy form by "relazification".
+//
+// A fully-initialized BaseScript can be identified with `hasBytecode()` and
+// will have bytecode and set of GC-things such as scopes, inner-functions, and
+// object/string literals. This is referred to as a "non-lazy" script.
+//
+// A lazy script has either an enclosing script or scope. Each script needs to
+// know its enclosing scope in order to be fully compiled. If the parent is
+// still lazy we track that script and will need to compile it first to know our
+// own enclosing scope. This is because scope objects are not created until full
+// compilation and bytecode generation.
+//
+//
+// # Script Warm-Up #
+//
+// A script evolves its representation over time. As it becomes "hotter" we
+// attach a stack of additional data-structures generated by the JITs to
+// speed-up execution. This evolution may also be run in reverse, in order to
+// reduce memory usage.
+//
+// +-------------------------------------+
+// | ScriptSource |
+// | Provides: Source |
+// | Engine: Parser |
+// +-------------------------------------+
+// v
+// +-----------------------------------------------+
+// | BaseScript |
+// | Provides: SourceExtent/Bindings |
+// | Engine: CompileLazyFunctionToStencil |
+// | /InstantiateStencilsForDelazify |
+// +-----------------------------------------------+
+// v
+// +-------------------------------------+
+// | ImmutableScriptData |
+// | Provides: Bytecode |
+// | Engine: Interpreter |
+// +-------------------------------------+
+// v
+// +-------------------------------------+
+// | JitScript |
+// | Provides: Inline Caches (ICs) |
+// | Engine: BaselineInterpreter |
+// +-------------------------------------+
+// v
+// +-------------------------------------+
+// | BaselineScript |
+// | Provides: Native Code |
+// | Engine: Baseline |
+// +-------------------------------------+
+// v
+// +-------------------------------------+
+// | IonScript |
+// | Provides: Optimized Native Code |
+// | Engine: IonMonkey |
+// +-------------------------------------+
+//
+// NOTE: Scripts may be directly created with bytecode and skip the lazy script
+// form. This is always the case for top-level scripts.
+class BaseScript : public gc::TenuredCellWithNonGCPointer<uint8_t> {
+ friend class js::gc::CellAllocator;
+
+ public:
+ // Pointer to baseline->method()->raw(), ion->method()->raw(), a wasm jit
+ // entry, the JIT's EnterInterpreter stub, or the lazy link stub. Must be
+ // non-null (except on no-jit builds). This is stored in the cell header.
+ uint8_t* jitCodeRaw() const { return headerPtr(); }
+
+ protected:
+ // Multi-purpose value that changes type as the script warms up from lazy form
+ // to interpreted-bytecode to JITs. See: ScriptWarmUpData type for more info.
+ ScriptWarmUpData warmUpData_ = {};
+
+ // For function scripts this is the canonical function, otherwise nullptr.
+ const GCPtr<JSFunction*> function_ = {};
+
+ // The ScriptSourceObject for this script. This is always same-compartment and
+ // same-realm with this script.
+ const GCPtr<ScriptSourceObject*> sourceObject_ = {};
+
+ // Position of the function in the source buffer. Both in terms of line/column
+ // and code-unit offset.
+ const SourceExtent extent_ = {};
+
+ // Immutable flags are a combination of parser options and bytecode
+ // characteristics. These flags are preserved when serializing or copying this
+ // script.
+ const ImmutableScriptFlags immutableFlags_ = {};
+
+ // Mutable flags store transient information used by subsystems such as the
+ // debugger and the JITs. These flags are *not* preserved when serializing or
+ // cloning since they are based on runtime state.
+ MutableScriptFlags mutableFlags_ = {};
+
+ // Variable-length data owned by this script. This stores one of:
+ // - GC pointers that bytecode references.
+ // - Inner-functions and bindings generated by syntax parse.
+ // - Nullptr, if no bytecode or inner functions.
+ // This is updated as script is delazified and relazified.
+ GCStructPtr<PrivateScriptData*> data_;
+
+ // Shareable script data. This includes runtime-wide atom pointers, bytecode,
+ // and various script note structures. If the script is currently lazy, this
+ // will be nullptr.
+ RefPtr<js::SharedImmutableScriptData> sharedData_ = {};
+
+ // End of fields.
+
+ BaseScript(uint8_t* stubEntry, JSFunction* function,
+ ScriptSourceObject* sourceObject, const SourceExtent& extent,
+ uint32_t immutableFlags);
+
+ void setJitCodeRaw(uint8_t* code) { setHeaderPtr(code); }
+
+ public:
+ static BaseScript* New(JSContext* cx, JS::Handle<JSFunction*> function,
+ JS::Handle<js::ScriptSourceObject*> sourceObject,
+ const js::SourceExtent& extent,
+ uint32_t immutableFlags);
+
+ // Create a lazy BaseScript without initializing any gc-things.
+ static BaseScript* CreateRawLazy(JSContext* cx, uint32_t ngcthings,
+ HandleFunction fun,
+ JS::Handle<ScriptSourceObject*> sourceObject,
+ const SourceExtent& extent,
+ uint32_t immutableFlags);
+
+ bool isUsingInterpreterTrampoline(JSRuntime* rt) const;
+
+ // Canonical function for the script, if it has a function. For top-level
+ // scripts this is nullptr.
+ JSFunction* function() const { return function_; }
+
+ JS::Realm* realm() const { return sourceObject()->realm(); }
+ JS::Compartment* compartment() const { return sourceObject()->compartment(); }
+ JS::Compartment* maybeCompartment() const { return compartment(); }
+ inline JSPrincipals* principals() const;
+
+ ScriptSourceObject* sourceObject() const { return sourceObject_; }
+ ScriptSource* scriptSource() const { return sourceObject()->source(); }
+ ScriptSource* maybeForwardedScriptSource() const;
+
+ bool mutedErrors() const { return scriptSource()->mutedErrors(); }
+
+ const char* filename() const { return scriptSource()->filename(); }
+ HashNumber filenameHash() const { return scriptSource()->filenameHash(); }
+ const char* maybeForwardedFilename() const {
+ return maybeForwardedScriptSource()->filename();
+ }
+
+ uint32_t sourceStart() const { return extent_.sourceStart; }
+ uint32_t sourceEnd() const { return extent_.sourceEnd; }
+ uint32_t sourceLength() const {
+ return extent_.sourceEnd - extent_.sourceStart;
+ }
+ uint32_t toStringStart() const { return extent_.toStringStart; }
+ uint32_t toStringEnd() const { return extent_.toStringEnd; }
+ SourceExtent extent() const { return extent_; }
+
+ [[nodiscard]] bool appendSourceDataForToString(JSContext* cx,
+ js::StringBuffer& buf);
+
+ uint32_t lineno() const { return extent_.lineno; }
+ uint32_t column() const { return extent_.column; }
+
+ JS::DelazificationOption delazificationMode() const {
+ return scriptSource()->delazificationMode();
+ }
+
+ public:
+ ImmutableScriptFlags immutableFlags() const { return immutableFlags_; }
+ RO_IMMUTABLE_SCRIPT_FLAGS(immutableFlags_)
+ RW_MUTABLE_SCRIPT_FLAGS(mutableFlags_)
+
+ bool hasEnclosingScript() const { return warmUpData_.isEnclosingScript(); }
+ BaseScript* enclosingScript() const {
+ return warmUpData_.toEnclosingScript();
+ }
+ void setEnclosingScript(BaseScript* enclosingScript);
+
+ // Returns true is the script has an enclosing scope but no bytecode. It is
+ // ready for delazification.
+ // NOTE: The enclosing script must have been successfully compiled at some
+ // point for the enclosing scope to exist. That script may have since been
+ // GC'd, but we kept the scope live so we can still compile ourselves.
+ bool isReadyForDelazification() const {
+ return warmUpData_.isEnclosingScope();
+ }
+
+ Scope* enclosingScope() const;
+ void setEnclosingScope(Scope* enclosingScope);
+ Scope* releaseEnclosingScope();
+
+ bool hasJitScript() const { return warmUpData_.isJitScript(); }
+ jit::JitScript* jitScript() const {
+ MOZ_ASSERT(hasJitScript());
+ return warmUpData_.toJitScript();
+ }
+ jit::JitScript* maybeJitScript() const {
+ return hasJitScript() ? jitScript() : nullptr;
+ }
+
+ inline bool hasBaselineScript() const;
+ inline bool hasIonScript() const;
+
+ bool hasPrivateScriptData() const { return data_ != nullptr; }
+
+ // Update data_ pointer while also informing GC MemoryUse tracking.
+ void swapData(UniquePtr<PrivateScriptData>& other);
+
+ mozilla::Span<const JS::GCCellPtr> gcthings() const {
+ return data_ ? data_->gcthings() : mozilla::Span<JS::GCCellPtr>();
+ }
+
+ // NOTE: This is only used to initialize a fresh script.
+ mozilla::Span<JS::GCCellPtr> gcthingsForInit() {
+ MOZ_ASSERT(!hasBytecode());
+ return data_ ? data_->gcthings() : mozilla::Span<JS::GCCellPtr>();
+ }
+
+ void setMemberInitializers(MemberInitializers memberInitializers) {
+ MOZ_ASSERT(useMemberInitializers());
+ MOZ_ASSERT(data_);
+ data_->setMemberInitializers(memberInitializers);
+ }
+ const MemberInitializers& getMemberInitializers() const {
+ MOZ_ASSERT(data_);
+ return data_->getMemberInitializers();
+ }
+
+ SharedImmutableScriptData* sharedData() const { return sharedData_; }
+ void initSharedData(SharedImmutableScriptData* data) {
+ MOZ_ASSERT(sharedData_ == nullptr);
+ sharedData_ = data;
+ }
+ void freeSharedData() { sharedData_ = nullptr; }
+
+ // NOTE: Script only has bytecode if JSScript::fullyInitFromStencil completes
+ // successfully.
+ bool hasBytecode() const {
+ if (sharedData_) {
+ MOZ_ASSERT(data_);
+ MOZ_ASSERT(warmUpData_.isWarmUpCount() || warmUpData_.isJitScript());
+ return true;
+ }
+ return false;
+ }
+
+ public:
+ static const JS::TraceKind TraceKind = JS::TraceKind::Script;
+
+ void traceChildren(JSTracer* trc);
+ void finalize(JS::GCContext* gcx);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return mallocSizeOf(data_);
+ }
+
+ inline JSScript* asJSScript();
+
+ // JIT accessors
+ static constexpr size_t offsetOfJitCodeRaw() { return offsetOfHeaderPtr(); }
+ static constexpr size_t offsetOfPrivateData() {
+ return offsetof(BaseScript, data_);
+ }
+ static constexpr size_t offsetOfSharedData() {
+ return offsetof(BaseScript, sharedData_);
+ }
+ static size_t offsetOfImmutableFlags() {
+ static_assert(sizeof(ImmutableScriptFlags) == sizeof(uint32_t));
+ return offsetof(BaseScript, immutableFlags_);
+ }
+ static constexpr size_t offsetOfMutableFlags() {
+ static_assert(sizeof(MutableScriptFlags) == sizeof(uint32_t));
+ return offsetof(BaseScript, mutableFlags_);
+ }
+ static constexpr size_t offsetOfWarmUpData() {
+ return offsetof(BaseScript, warmUpData_);
+ }
+};
+
+extern void SweepScriptData(JSRuntime* rt);
+
+} /* namespace js */
+
+class JSScript : public js::BaseScript {
+ private:
+ friend bool js::PrivateScriptData::InitFromStencil(
+ JSContext* cx, js::HandleScript script,
+ const js::frontend::CompilationAtomCache& atomCache,
+ const js::frontend::CompilationStencil& stencil,
+ js::frontend::CompilationGCOutput& gcOutput,
+ const js::frontend::ScriptIndex scriptIndex);
+
+ private:
+ using js::BaseScript::BaseScript;
+
+ public:
+ static JSScript* Create(JSContext* cx, JS::Handle<JSFunction*> function,
+ JS::Handle<js::ScriptSourceObject*> sourceObject,
+ const js::SourceExtent& extent,
+ js::ImmutableScriptFlags flags);
+
+ // NOTE: This should only be used while delazifying.
+ static JSScript* CastFromLazy(js::BaseScript* lazy) {
+ return static_cast<JSScript*>(lazy);
+ }
+
+ // NOTE: If you use createPrivateScriptData directly instead of via
+ // fullyInitFromStencil, you are responsible for notifying the debugger
+ // after successfully creating the script.
+ static bool createPrivateScriptData(JSContext* cx,
+ JS::Handle<JSScript*> script,
+ uint32_t ngcthings);
+
+ public:
+ static bool fullyInitFromStencil(
+ JSContext* cx, const js::frontend::CompilationAtomCache& atomCache,
+ const js::frontend::CompilationStencil& stencil,
+ js::frontend::CompilationGCOutput& gcOutput, js::HandleScript script,
+ const js::frontend::ScriptIndex scriptIndex);
+
+ // Allocate a JSScript and initialize it with bytecode. This consumes
+ // allocations within the stencil.
+ static JSScript* fromStencil(JSContext* cx,
+ js::frontend::CompilationAtomCache& atomCache,
+ const js::frontend::CompilationStencil& stencil,
+ js::frontend::CompilationGCOutput& gcOutput,
+ js::frontend::ScriptIndex scriptIndex);
+
+#ifdef DEBUG
+ private:
+ // Assert that jump targets are within the code array of the script.
+ void assertValidJumpTargets() const;
+#endif
+
+ public:
+ js::ImmutableScriptData* immutableScriptData() const {
+ return sharedData_->get();
+ }
+
+ // Script bytecode is immutable after creation.
+ jsbytecode* code() const {
+ if (!sharedData_) {
+ return nullptr;
+ }
+ return immutableScriptData()->code();
+ }
+
+ bool hasForceInterpreterOp() const {
+ // JSOp::ForceInterpreter, if present, must be the first op.
+ MOZ_ASSERT(length() >= 1);
+ return JSOp(*code()) == JSOp::ForceInterpreter;
+ }
+
+ js::AllBytecodesIterable allLocations() {
+ return js::AllBytecodesIterable(this);
+ }
+
+ js::BytecodeLocation location() { return js::BytecodeLocation(this, code()); }
+
+ size_t length() const {
+ MOZ_ASSERT(sharedData_);
+ return immutableScriptData()->codeLength();
+ }
+
+ jsbytecode* codeEnd() const { return code() + length(); }
+
+ jsbytecode* lastPC() const {
+ jsbytecode* pc = codeEnd() - js::JSOpLength_RetRval;
+ MOZ_ASSERT(JSOp(*pc) == JSOp::RetRval || JSOp(*pc) == JSOp::Return);
+ return pc;
+ }
+
+ // Note: ArgBytes is optional, but if specified then containsPC will also
+ // check that the opcode arguments are in bounds.
+ template <size_t ArgBytes = 0>
+ bool containsPC(const jsbytecode* pc) const {
+ MOZ_ASSERT_IF(ArgBytes,
+ js::GetBytecodeLength(pc) == sizeof(jsbytecode) + ArgBytes);
+ const jsbytecode* lastByte = pc + ArgBytes;
+ return pc >= code() && lastByte < codeEnd();
+ }
+ template <typename ArgType>
+ bool containsPC(const jsbytecode* pc) const {
+ return containsPC<sizeof(ArgType)>(pc);
+ }
+
+ bool contains(const js::BytecodeLocation& loc) const {
+ return containsPC(loc.toRawBytecode());
+ }
+
+ size_t pcToOffset(const jsbytecode* pc) const {
+ MOZ_ASSERT(containsPC(pc));
+ return size_t(pc - code());
+ }
+
+ jsbytecode* offsetToPC(size_t offset) const {
+ MOZ_ASSERT(offset < length());
+ return code() + offset;
+ }
+
+ size_t mainOffset() const { return immutableScriptData()->mainOffset; }
+
+ // The fixed part of a stack frame is comprised of vars (in function and
+ // module code) and block-scoped locals (in all kinds of code).
+ size_t nfixed() const { return immutableScriptData()->nfixed; }
+
+ // Number of fixed slots reserved for slots that are always live. Only
+ // nonzero for function or module code.
+ size_t numAlwaysLiveFixedSlots() const;
+
+ // Calculate the number of fixed slots that are live at a particular bytecode.
+ size_t calculateLiveFixed(jsbytecode* pc);
+
+ size_t nslots() const { return immutableScriptData()->nslots; }
+
+ unsigned numArgs() const;
+
+ inline js::Shape* initialEnvironmentShape() const;
+
+ bool functionHasParameterExprs() const;
+
+ bool functionAllowsParameterRedeclaration() const {
+ // Parameter redeclaration is only allowed for non-strict functions with
+ // simple parameter lists, which are neither arrow nor method functions. We
+ // don't have a flag at hand to test the function kind, but we can still
+ // test if the function is non-strict and has a simple parameter list by
+ // checking |hasMappedArgsObj()|. (Mapped arguments objects are only
+ // created for non-strict functions with simple parameter lists.)
+ return hasMappedArgsObj();
+ }
+
+ size_t numICEntries() const { return immutableScriptData()->numICEntries; }
+
+ size_t funLength() const { return immutableScriptData()->funLength; }
+
+ void cacheForEval() {
+ MOZ_ASSERT(isForEval());
+ // IsEvalCacheCandidate will make sure that there's nothing in this
+ // script that would prevent reexecution even if isRunOnce is
+ // true. So just pretend like we never ran this script.
+ clearFlag(MutableFlags::HasRunOnce);
+ }
+
+ /*
+ * Arguments access (via JSOp::*Arg* opcodes) must access the canonical
+ * location for the argument. If an arguments object exists AND it's mapped
+ * ('arguments' aliases formals), then all access must go through the
+ * arguments object. Otherwise, the local slot is the canonical location for
+ * the arguments. Note: if a formal is aliased through the scope chain, then
+ * script->formalIsAliased and JSOp::*Arg* opcodes won't be emitted at all.
+ */
+ bool argsObjAliasesFormals() const {
+ return needsArgsObj() && hasMappedArgsObj();
+ }
+
+ void updateJitCodeRaw(JSRuntime* rt);
+
+ bool isModule() const;
+ js::ModuleObject* module() const;
+
+ bool isGlobalCode() const;
+
+ // Returns true if the script may read formal arguments on the stack
+ // directly, via lazy arguments or a rest parameter.
+ bool mayReadFrameArgsDirectly();
+
+ static JSLinearString* sourceData(JSContext* cx, JS::HandleScript script);
+
+#ifdef MOZ_VTUNE
+ // Unique Method ID passed to the VTune profiler. Allows attribution of
+ // different jitcode to the same source script.
+ uint32_t vtuneMethodID();
+#endif
+
+ public:
+ /* Return whether this is a 'direct eval' script in a function scope. */
+ bool isDirectEvalInFunction() const;
+
+ /*
+ * Return whether this script is a top-level script.
+ *
+ * If we evaluate some code which contains a syntax error, then we might
+ * produce a JSScript which has no associated bytecode. Testing with
+ * |code()| filters out this kind of scripts.
+ *
+ * If this script has a function associated to it, then it is not the
+ * top-level of a file.
+ */
+ bool isTopLevel() { return code() && !isFunction(); }
+
+ /* Ensure the script has a JitScript. */
+ inline bool ensureHasJitScript(JSContext* cx, js::jit::AutoKeepJitScripts&);
+
+ void maybeReleaseJitScript(JS::GCContext* gcx);
+ void releaseJitScript(JS::GCContext* gcx);
+ void releaseJitScriptOnFinalize(JS::GCContext* gcx);
+
+ inline js::jit::BaselineScript* baselineScript() const;
+ inline js::jit::IonScript* ionScript() const;
+
+ inline bool isIonCompilingOffThread() const;
+ inline bool canIonCompile() const;
+ inline void disableIon();
+
+ inline bool canBaselineCompile() const;
+ inline void disableBaselineCompile();
+
+ inline js::GlobalObject& global() const;
+ inline bool hasGlobal(const js::GlobalObject* global) const;
+ js::GlobalObject& uninlinedGlobal() const;
+
+ js::GCThingIndex bodyScopeIndex() const {
+ return immutableScriptData()->bodyScopeIndex;
+ }
+
+ js::Scope* bodyScope() const { return getScope(bodyScopeIndex()); }
+
+ js::Scope* outermostScope() const {
+ // The body scope may not be the outermost scope in the script when
+ // the decl env scope is present.
+ return getScope(js::GCThingIndex::outermostScopeIndex());
+ }
+
+ bool functionHasExtraBodyVarScope() const {
+ bool res = BaseScript::functionHasExtraBodyVarScope();
+ MOZ_ASSERT_IF(res, functionHasParameterExprs());
+ return res;
+ }
+
+ js::VarScope* functionExtraBodyVarScope() const;
+
+ bool needsBodyEnvironment() const;
+
+ inline js::LexicalScope* maybeNamedLambdaScope() const;
+
+ // Drop script data and reset warmUpData to reference enclosing scope.
+ void relazify(JSRuntime* rt);
+
+ private:
+ bool createJitScript(JSContext* cx);
+
+ bool shareScriptData(JSContext* cx);
+
+ public:
+ inline uint32_t getWarmUpCount() const;
+ inline void incWarmUpCounter();
+ inline void resetWarmUpCounterForGC();
+
+ void resetWarmUpCounterToDelayIonCompilation();
+
+ unsigned getWarmUpResetCount() const {
+ constexpr uint32_t MASK = uint32_t(MutableFlags::WarmupResets_MASK);
+ return mutableFlags_ & MASK;
+ }
+ void incWarmUpResetCounter() {
+ constexpr uint32_t MASK = uint32_t(MutableFlags::WarmupResets_MASK);
+ uint32_t newCount = getWarmUpResetCount() + 1;
+ if (newCount <= MASK) {
+ mutableFlags_ &= ~MASK;
+ mutableFlags_ |= newCount;
+ }
+ }
+ void resetWarmUpResetCounter() {
+ constexpr uint32_t MASK = uint32_t(MutableFlags::WarmupResets_MASK);
+ mutableFlags_ &= ~MASK;
+ }
+
+ public:
+ bool initScriptCounts(JSContext* cx);
+ js::ScriptCounts& getScriptCounts();
+ js::PCCounts* maybeGetPCCounts(jsbytecode* pc);
+ const js::PCCounts* maybeGetThrowCounts(jsbytecode* pc);
+ js::PCCounts* getThrowCounts(jsbytecode* pc);
+ uint64_t getHitCount(jsbytecode* pc);
+ void addIonCounts(js::jit::IonScriptCounts* ionCounts);
+ js::jit::IonScriptCounts* getIonCounts();
+ void releaseScriptCounts(js::ScriptCounts* counts);
+ void destroyScriptCounts();
+ void resetScriptCounts();
+
+ jsbytecode* main() const { return code() + mainOffset(); }
+
+ js::BytecodeLocation mainLocation() const {
+ return js::BytecodeLocation(this, main());
+ }
+
+ js::BytecodeLocation endLocation() const {
+ return js::BytecodeLocation(this, codeEnd());
+ }
+
+ js::BytecodeLocation offsetToLocation(uint32_t offset) const {
+ return js::BytecodeLocation(this, offsetToPC(offset));
+ }
+
+ void addSizeOfJitScript(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* sizeOfJitScript,
+ size_t* sizeOfBaselineFallbackStubs) const;
+
+ mozilla::Span<const js::TryNote> trynotes() const {
+ return immutableScriptData()->tryNotes();
+ }
+
+ mozilla::Span<const js::ScopeNote> scopeNotes() const {
+ return immutableScriptData()->scopeNotes();
+ }
+
+ mozilla::Span<const uint32_t> resumeOffsets() const {
+ return immutableScriptData()->resumeOffsets();
+ }
+
+ uint32_t tableSwitchCaseOffset(jsbytecode* pc, uint32_t caseIndex) const {
+ MOZ_ASSERT(containsPC(pc));
+ MOZ_ASSERT(JSOp(*pc) == JSOp::TableSwitch);
+ uint32_t firstResumeIndex = GET_RESUMEINDEX(pc + 3 * JUMP_OFFSET_LEN);
+ return resumeOffsets()[firstResumeIndex + caseIndex];
+ }
+ jsbytecode* tableSwitchCasePC(jsbytecode* pc, uint32_t caseIndex) const {
+ return offsetToPC(tableSwitchCaseOffset(pc, caseIndex));
+ }
+
+ bool hasLoops();
+
+ uint32_t numNotes() const {
+ MOZ_ASSERT(sharedData_);
+ return immutableScriptData()->noteLength();
+ }
+ js::SrcNote* notes() const {
+ MOZ_ASSERT(sharedData_);
+ return immutableScriptData()->notes();
+ }
+
+ JSString* getString(js::GCThingIndex index) const {
+ return &gcthings()[index].as<JSString>();
+ }
+
+ JSString* getString(jsbytecode* pc) const {
+ MOZ_ASSERT(containsPC<js::GCThingIndex>(pc));
+ MOZ_ASSERT(js::JOF_OPTYPE((JSOp)*pc) == JOF_STRING);
+ return getString(GET_GCTHING_INDEX(pc));
+ }
+
+ JSAtom* getAtom(js::GCThingIndex index) const {
+ return &gcthings()[index].as<JSString>().asAtom();
+ }
+
+ JSAtom* getAtom(jsbytecode* pc) const {
+ MOZ_ASSERT(containsPC<js::GCThingIndex>(pc));
+ MOZ_ASSERT(js::JOF_OPTYPE((JSOp)*pc) == JOF_ATOM);
+ return getAtom(GET_GCTHING_INDEX(pc));
+ }
+
+ js::PropertyName* getName(js::GCThingIndex index) {
+ return getAtom(index)->asPropertyName();
+ }
+
+ js::PropertyName* getName(jsbytecode* pc) const {
+ return getAtom(pc)->asPropertyName();
+ }
+
+ JSObject* getObject(js::GCThingIndex index) const {
+ MOZ_ASSERT(gcthings()[index].asCell()->isTenured());
+ return &gcthings()[index].as<JSObject>();
+ }
+
+ JSObject* getObject(const jsbytecode* pc) const {
+ MOZ_ASSERT(containsPC<js::GCThingIndex>(pc));
+ return getObject(GET_GCTHING_INDEX(pc));
+ }
+
+ js::SharedShape* getShape(js::GCThingIndex index) const {
+ return &gcthings()[index].as<js::Shape>().asShared();
+ }
+
+ js::SharedShape* getShape(const jsbytecode* pc) const {
+ MOZ_ASSERT(containsPC<js::GCThingIndex>(pc));
+ return getShape(GET_GCTHING_INDEX(pc));
+ }
+
+ js::Scope* getScope(js::GCThingIndex index) const {
+ return &gcthings()[index].as<js::Scope>();
+ }
+
+ js::Scope* getScope(jsbytecode* pc) const {
+ // This method is used to get a scope directly using a JSOp with an
+ // index. To search through ScopeNotes to look for a Scope using pc,
+ // use lookupScope.
+ MOZ_ASSERT(containsPC<js::GCThingIndex>(pc));
+ MOZ_ASSERT(js::JOF_OPTYPE(JSOp(*pc)) == JOF_SCOPE,
+ "Did you mean to use lookupScope(pc)?");
+ return getScope(GET_GCTHING_INDEX(pc));
+ }
+
+ inline JSFunction* getFunction(js::GCThingIndex index) const;
+ inline JSFunction* getFunction(jsbytecode* pc) const;
+
+ inline js::RegExpObject* getRegExp(js::GCThingIndex index) const;
+ inline js::RegExpObject* getRegExp(jsbytecode* pc) const;
+
+ js::BigInt* getBigInt(js::GCThingIndex index) const {
+ MOZ_ASSERT(gcthings()[index].asCell()->isTenured());
+ return &gcthings()[index].as<js::BigInt>();
+ }
+
+ js::BigInt* getBigInt(jsbytecode* pc) const {
+ MOZ_ASSERT(containsPC<js::GCThingIndex>(pc));
+ MOZ_ASSERT(js::JOF_OPTYPE(JSOp(*pc)) == JOF_BIGINT);
+ return getBigInt(GET_GCTHING_INDEX(pc));
+ }
+
+ // The following 3 functions find the static scope just before the
+ // execution of the instruction pointed to by pc.
+
+ js::Scope* lookupScope(const jsbytecode* pc) const;
+
+ js::Scope* innermostScope(const jsbytecode* pc) const;
+ js::Scope* innermostScope() const { return innermostScope(main()); }
+
+ /*
+ * The isEmpty method tells whether this script has code that computes any
+ * result (not return value, result AKA normal completion value) other than
+ * JSVAL_VOID, or any other effects.
+ */
+ bool isEmpty() const {
+ if (length() > 3) {
+ return false;
+ }
+
+ jsbytecode* pc = code();
+ if (noScriptRval() && JSOp(*pc) == JSOp::False) {
+ ++pc;
+ }
+ return JSOp(*pc) == JSOp::RetRval;
+ }
+
+ bool formalIsAliased(unsigned argSlot);
+ bool anyFormalIsForwarded();
+ bool formalLivesInArgumentsObject(unsigned argSlot);
+
+ // See comment above 'debugMode' in Realm.h for explanation of
+ // invariants of debuggee compartments, scripts, and frames.
+ inline bool isDebuggee() const;
+
+ // Create an allocation site associated with this script/JitScript to track
+ // nursery allocations.
+ js::gc::AllocSite* createAllocSite();
+
+ // A helper class to prevent relazification of the given function's script
+ // while it's holding on to it. This class automatically roots the script.
+ class AutoDelazify;
+ friend class AutoDelazify;
+
+ class AutoDelazify {
+ JS::RootedScript script_;
+ JSContext* cx_;
+ bool oldAllowRelazify_ = false;
+
+ public:
+ explicit AutoDelazify(JSContext* cx, JS::HandleFunction fun = nullptr)
+ : script_(cx), cx_(cx) {
+ holdScript(fun);
+ }
+
+ ~AutoDelazify() { dropScript(); }
+
+ void operator=(JS::HandleFunction fun) {
+ dropScript();
+ holdScript(fun);
+ }
+
+ operator JS::HandleScript() const { return script_; }
+ explicit operator bool() const { return script_; }
+
+ private:
+ void holdScript(JS::HandleFunction fun);
+ void dropScript();
+ };
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ public:
+ struct DumpOptions {
+ bool recursive = false;
+ bool runtimeData = false;
+ };
+
+ void dump(JSContext* cx);
+ void dumpRecursive(JSContext* cx);
+
+ static bool dump(JSContext* cx, JS::Handle<JSScript*> script,
+ DumpOptions& options, js::Sprinter* sp);
+ static bool dumpSrcNotes(JSContext* cx, JS::Handle<JSScript*> script,
+ js::Sprinter* sp);
+ static bool dumpTryNotes(JSContext* cx, JS::Handle<JSScript*> script,
+ js::Sprinter* sp);
+ static bool dumpScopeNotes(JSContext* cx, JS::Handle<JSScript*> script,
+ js::Sprinter* sp);
+ static bool dumpGCThings(JSContext* cx, JS::Handle<JSScript*> script,
+ js::Sprinter* sp);
+#endif
+};
+
+namespace js {
+
+struct ScriptAndCounts {
+ /* This structure is stored and marked from the JSRuntime. */
+ JSScript* script;
+ ScriptCounts scriptCounts;
+
+ inline explicit ScriptAndCounts(JSScript* script);
+ inline ScriptAndCounts(ScriptAndCounts&& sac);
+
+ const PCCounts* maybeGetPCCounts(jsbytecode* pc) const {
+ return scriptCounts.maybeGetPCCounts(script->pcToOffset(pc));
+ }
+ const PCCounts* maybeGetThrowCounts(jsbytecode* pc) const {
+ return scriptCounts.maybeGetThrowCounts(script->pcToOffset(pc));
+ }
+
+ jit::IonScriptCounts* getIonCounts() const { return scriptCounts.ionCounts_; }
+
+ void trace(JSTracer* trc) {
+ TraceRoot(trc, &script, "ScriptAndCounts::script");
+ }
+};
+
+extern JS::UniqueChars FormatIntroducedFilename(const char* filename,
+ unsigned lineno,
+ const char* introducer);
+
+struct GSNCache;
+
+const js::SrcNote* GetSrcNote(GSNCache& cache, JSScript* script,
+ jsbytecode* pc);
+
+extern const js::SrcNote* GetSrcNote(JSContext* cx, JSScript* script,
+ jsbytecode* pc);
+
+extern jsbytecode* LineNumberToPC(JSScript* script, unsigned lineno);
+
+extern JS_PUBLIC_API unsigned GetScriptLineExtent(JSScript* script);
+
+#ifdef JS_CACHEIR_SPEW
+void maybeUpdateWarmUpCount(JSScript* script);
+void maybeSpewScriptFinalWarmUpCount(JSScript* script);
+#endif
+
+} /* namespace js */
+
+namespace js {
+
+extern unsigned PCToLineNumber(JSScript* script, jsbytecode* pc,
+ unsigned* columnp = nullptr);
+
+extern unsigned PCToLineNumber(unsigned startLine, unsigned startCol,
+ SrcNote* notes, jsbytecode* code, jsbytecode* pc,
+ unsigned* columnp = nullptr);
+
+/*
+ * This function returns the file and line number of the script currently
+ * executing on cx. If there is no current script executing on cx (e.g., a
+ * native called directly through JSAPI (e.g., by setTimeout)), nullptr and 0
+ * are returned as the file and line.
+ */
+extern void DescribeScriptedCallerForCompilation(
+ JSContext* cx, MutableHandleScript maybeScript, const char** file,
+ unsigned* linenop, uint32_t* pcOffset, bool* mutedErrors);
+
+/*
+ * Like DescribeScriptedCallerForCompilation, but this function avoids looking
+ * up the script/pc and the full linear scan to compute line number.
+ */
+extern void DescribeScriptedCallerForDirectEval(
+ JSContext* cx, HandleScript script, jsbytecode* pc, const char** file,
+ unsigned* linenop, uint32_t* pcOffset, bool* mutedErrors);
+
+bool CheckCompileOptionsMatch(const JS::ReadOnlyCompileOptions& options,
+ js::ImmutableScriptFlags flags,
+ bool isMultiDecode);
+
+void FillImmutableFlagsFromCompileOptionsForTopLevel(
+ const JS::ReadOnlyCompileOptions& options, js::ImmutableScriptFlags& flags);
+
+void FillImmutableFlagsFromCompileOptionsForFunction(
+ const JS::ReadOnlyCompileOptions& options, js::ImmutableScriptFlags& flags);
+
+} /* namespace js */
+
+namespace JS {
+
+template <>
+struct GCPolicy<js::ScriptLCovEntry>
+ : public IgnoreGCPolicy<js::ScriptLCovEntry> {};
+
+#ifdef JS_CACHEIR_SPEW
+template <>
+struct GCPolicy<js::ScriptFinalWarmUpCountEntry>
+ : public IgnoreGCPolicy<js::ScriptFinalWarmUpCountEntry> {};
+#endif
+
+namespace ubi {
+
+template <>
+class Concrete<JSScript> : public Concrete<js::BaseScript> {};
+
+} // namespace ubi
+} // namespace JS
+
+#endif /* vm_JSScript_h */
diff --git a/js/src/vm/JitActivation.cpp b/js/src/vm/JitActivation.cpp
new file mode 100644
index 0000000000..ba1fe517bc
--- /dev/null
+++ b/js/src/vm/JitActivation.cpp
@@ -0,0 +1,261 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/JitActivation.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_RELEASE_ASSERT
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t
+#include <utility> // std::move
+
+#include "debugger/DebugAPI.h" // js::DebugAPI
+#include "jit/JSJitFrameIter.h" // js::jit::InlineFrameIterator
+#include "jit/RematerializedFrame.h" // js::jit::RematerializedFrame
+#include "js/AllocPolicy.h" // js::ReportOutOfMemory
+#include "vm/EnvironmentObject.h" // js::DebugEnvironments
+#include "vm/JSContext.h" // JSContext
+#include "vm/Realm.h" // js::AutoRealmUnchecked
+#include "wasm/WasmCode.h" // js::wasm::Code
+#include "wasm/WasmConstants.h" // js::wasm::Trap
+#include "wasm/WasmFrameIter.h" // js::wasm::{RegisterState,StartUnwinding,UnwindState}
+#include "wasm/WasmInstance.h" // js::wasm::Instance
+#include "wasm/WasmProcess.h" // js::wasm::LookupCode
+
+#include "vm/Realm-inl.h" // js::~AutoRealm
+
+class JS_PUBLIC_API JSTracer;
+
+js::jit::JitActivation::JitActivation(JSContext* cx)
+ : Activation(cx, Jit),
+ packedExitFP_(nullptr),
+ encodedWasmExitReason_(0),
+ prevJitActivation_(cx->jitActivation),
+ rematerializedFrames_(),
+ ionRecovery_(cx),
+ bailoutData_(nullptr),
+ lastProfilingFrame_(nullptr),
+ lastProfilingCallSite_(nullptr) {
+ cx->jitActivation = this;
+ registerProfiling();
+}
+
+js::jit::JitActivation::~JitActivation() {
+ if (isProfiling()) {
+ unregisterProfiling();
+ }
+ cx_->jitActivation = prevJitActivation_;
+
+ // All reocvered value are taken from activation during the bailout.
+ MOZ_ASSERT(ionRecovery_.empty());
+
+ // The BailoutFrameInfo should have unregistered itself from the
+ // JitActivations.
+ MOZ_ASSERT(!bailoutData_);
+
+ // Traps get handled immediately.
+ MOZ_ASSERT(!isWasmTrapping());
+
+ clearRematerializedFrames();
+}
+
+void js::jit::JitActivation::setBailoutData(
+ jit::BailoutFrameInfo* bailoutData) {
+ MOZ_ASSERT(!bailoutData_);
+ bailoutData_ = bailoutData;
+}
+
+void js::jit::JitActivation::cleanBailoutData() {
+ MOZ_ASSERT(bailoutData_);
+ bailoutData_ = nullptr;
+}
+
+void js::jit::JitActivation::removeRematerializedFrame(uint8_t* top) {
+ if (!rematerializedFrames_) {
+ return;
+ }
+
+ if (RematerializedFrameTable::Ptr p = rematerializedFrames_->lookup(top)) {
+ rematerializedFrames_->remove(p);
+ }
+}
+
+void js::jit::JitActivation::clearRematerializedFrames() {
+ if (!rematerializedFrames_) {
+ return;
+ }
+
+ for (RematerializedFrameTable::Enum e(*rematerializedFrames_); !e.empty();
+ e.popFront()) {
+ e.removeFront();
+ }
+}
+
+js::jit::RematerializedFrame* js::jit::JitActivation::getRematerializedFrame(
+ JSContext* cx, const JSJitFrameIter& iter, size_t inlineDepth,
+ MaybeReadFallback::FallbackConsequence consequence) {
+ MOZ_ASSERT(iter.activation() == this);
+ MOZ_ASSERT(iter.isIonScripted());
+
+ if (!rematerializedFrames_) {
+ rematerializedFrames_ = cx->make_unique<RematerializedFrameTable>(cx);
+ if (!rematerializedFrames_) {
+ return nullptr;
+ }
+ }
+
+ uint8_t* top = iter.fp();
+ RematerializedFrameTable::AddPtr p = rematerializedFrames_->lookupForAdd(top);
+ if (!p) {
+ RematerializedFrameVector frames(cx);
+
+ // The unit of rematerialization is an uninlined frame and its inlined
+ // frames. Since inlined frames do not exist outside of snapshots, it
+ // is impossible to synchronize their rematerialized copies to
+ // preserve identity. Therefore, we always rematerialize an uninlined
+ // frame and all its inlined frames at once.
+ InlineFrameIterator inlineIter(cx, &iter);
+ MaybeReadFallback recover(cx, this, &iter, consequence);
+
+ // Frames are often rematerialized with the cx inside a Debugger's
+ // realm. To recover slots and to create CallObjects, we need to
+ // be in the script's realm.
+ AutoRealmUnchecked ar(cx, iter.script()->realm());
+
+ if (!RematerializedFrame::RematerializeInlineFrames(cx, top, inlineIter,
+ recover, frames)) {
+ return nullptr;
+ }
+
+ if (!rematerializedFrames_->add(p, top, std::move(frames))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ // See comment in unsetPrevUpToDateUntil.
+ DebugEnvironments::unsetPrevUpToDateUntil(cx,
+ p->value()[inlineDepth].get());
+ }
+
+ return p->value()[inlineDepth].get();
+}
+
+js::jit::RematerializedFrame* js::jit::JitActivation::lookupRematerializedFrame(
+ uint8_t* top, size_t inlineDepth) {
+ if (!rematerializedFrames_) {
+ return nullptr;
+ }
+ if (RematerializedFrameTable::Ptr p = rematerializedFrames_->lookup(top)) {
+ return inlineDepth < p->value().length() ? p->value()[inlineDepth].get()
+ : nullptr;
+ }
+ return nullptr;
+}
+
+void js::jit::JitActivation::removeRematerializedFramesFromDebugger(
+ JSContext* cx, uint8_t* top) {
+ // Ion bailout can fail due to overrecursion and OOM. In such cases we
+ // cannot honor any further Debugger hooks on the frame, and need to
+ // ensure that its Debugger.Frame entry is cleaned up.
+ if (!cx->realm()->isDebuggee() || !rematerializedFrames_) {
+ return;
+ }
+ if (RematerializedFrameTable::Ptr p = rematerializedFrames_->lookup(top)) {
+ for (uint32_t i = 0; i < p->value().length(); i++) {
+ DebugAPI::handleUnrecoverableIonBailoutError(cx, p->value()[i].get());
+ }
+ rematerializedFrames_->remove(p);
+ }
+}
+
+void js::jit::JitActivation::traceRematerializedFrames(JSTracer* trc) {
+ if (!rematerializedFrames_) {
+ return;
+ }
+ for (RematerializedFrameTable::Enum e(*rematerializedFrames_); !e.empty();
+ e.popFront()) {
+ e.front().value().trace(trc);
+ }
+}
+
+bool js::jit::JitActivation::registerIonFrameRecovery(
+ RInstructionResults&& results) {
+ // Check that there is no entry in the vector yet.
+ MOZ_ASSERT(!maybeIonFrameRecovery(results.frame()));
+ if (!ionRecovery_.append(std::move(results))) {
+ return false;
+ }
+
+ return true;
+}
+
+js::jit::RInstructionResults* js::jit::JitActivation::maybeIonFrameRecovery(
+ JitFrameLayout* fp) {
+ for (RInstructionResults* it = ionRecovery_.begin(); it != ionRecovery_.end();
+ it++) {
+ if (it->frame() == fp) {
+ return it;
+ }
+ }
+
+ return nullptr;
+}
+
+void js::jit::JitActivation::removeIonFrameRecovery(JitFrameLayout* fp) {
+ RInstructionResults* elem = maybeIonFrameRecovery(fp);
+ if (!elem) {
+ return;
+ }
+
+ ionRecovery_.erase(elem);
+}
+
+void js::jit::JitActivation::traceIonRecovery(JSTracer* trc) {
+ for (RInstructionResults* it = ionRecovery_.begin(); it != ionRecovery_.end();
+ it++) {
+ it->trace(trc);
+ }
+}
+
+void js::jit::JitActivation::startWasmTrap(wasm::Trap trap,
+ uint32_t bytecodeOffset,
+ const wasm::RegisterState& state) {
+ MOZ_ASSERT(!isWasmTrapping());
+
+ bool unwound;
+ wasm::UnwindState unwindState;
+ MOZ_RELEASE_ASSERT(wasm::StartUnwinding(state, &unwindState, &unwound));
+ MOZ_ASSERT(unwound == (trap == wasm::Trap::IndirectCallBadSig));
+
+ void* pc = unwindState.pc;
+ const wasm::Frame* fp = wasm::Frame::fromUntaggedWasmExitFP(unwindState.fp);
+
+ const wasm::Code& code = wasm::GetNearestEffectiveInstance(fp)->code();
+ MOZ_RELEASE_ASSERT(&code == wasm::LookupCode(pc));
+
+ // If the frame was unwound, the bytecodeOffset must be recovered from the
+ // callsite so that it is accurate.
+ if (unwound) {
+ bytecodeOffset = code.lookupCallSite(pc)->lineOrBytecode();
+ }
+
+ setWasmExitFP(fp);
+ wasmTrapData_.emplace();
+ wasmTrapData_->resumePC =
+ ((uint8_t*)state.pc) + jit::WasmTrapInstructionLength;
+ wasmTrapData_->unwoundPC = pc;
+ wasmTrapData_->trap = trap;
+ wasmTrapData_->bytecodeOffset = bytecodeOffset;
+
+ MOZ_ASSERT(isWasmTrapping());
+}
+
+void js::jit::JitActivation::finishWasmTrap() {
+ MOZ_ASSERT(isWasmTrapping());
+ packedExitFP_ = nullptr;
+ wasmTrapData_.reset();
+ MOZ_ASSERT(!isWasmTrapping());
+}
diff --git a/js/src/vm/JitActivation.h b/js/src/vm/JitActivation.h
new file mode 100644
index 0000000000..44cd3874d4
--- /dev/null
+++ b/js/src/vm/JitActivation.h
@@ -0,0 +1,268 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_JitActivation_h
+#define vm_JitActivation_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Atomics.h" // mozilla::Atomic, mozilla::Relaxed
+#include "mozilla/Maybe.h" // mozilla::Maybe
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t, uintptr_t
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "jit/IonTypes.h" // CHECK_OSIPOINT_REGISTERS
+#include "jit/JSJitFrameIter.h" // js::jit::{JSJitFrameIter,RInstructionResults}
+#ifdef CHECK_OSIPOINT_REGISTERS
+# include "jit/Registers.h" // js::jit::RegisterDump
+#endif
+#include "jit/RematerializedFrame.h" // js::jit::RematerializedFrame
+#include "js/GCVector.h" // JS::GCVector
+#include "js/HashTable.h" // js::HashMap
+#include "js/UniquePtr.h" // js::UniquePtr
+#include "vm/Activation.h" // js::Activation
+#include "wasm/WasmCodegenTypes.h" // js::wasm::TrapData
+#include "wasm/WasmConstants.h" // js::wasm::Trap
+#include "wasm/WasmFrame.h" // js::wasm::Frame
+#include "wasm/WasmFrameIter.h" // js::wasm::{ExitReason,RegisterState,WasmFrameIter}
+
+struct JS_PUBLIC_API JSContext;
+class JS_PUBLIC_API JSTracer;
+
+namespace js {
+
+namespace jit {
+
+class BailoutFrameInfo;
+
+// A JitActivation is used for frames running in Baseline or Ion.
+class JitActivation : public Activation {
+ // If Baseline, Ion or Wasm code is on the stack, and has called into C++,
+ // this will be aligned to an ExitFrame. The last bit indicates if it's a
+ // wasm frame (bit set to wasm::ExitOrJitEntryFPTag) or not
+ // (bit set to ~wasm::ExitOrJitEntryFPTag).
+ uint8_t* packedExitFP_;
+
+ // When hasWasmExitFP(), encodedWasmExitReason_ holds ExitReason.
+ uint32_t encodedWasmExitReason_;
+
+ JitActivation* prevJitActivation_;
+
+ // Rematerialized Ion frames which has info copied out of snapshots. Maps
+ // frame pointers (i.e. packedExitFP_) to a vector of rematerializations of
+ // all inline frames associated with that frame.
+ //
+ // This table is lazily initialized by calling getRematerializedFrame.
+ using RematerializedFrameVector =
+ JS::GCVector<js::UniquePtr<RematerializedFrame>>;
+ using RematerializedFrameTable =
+ js::HashMap<uint8_t*, RematerializedFrameVector>;
+ js::UniquePtr<RematerializedFrameTable> rematerializedFrames_;
+
+ // This vector is used to remember the outcome of the evaluation of recover
+ // instructions.
+ //
+ // RInstructionResults are appended into this vector when Snapshot values
+ // have to be read, or when the evaluation has to run before some mutating
+ // code. Each RInstructionResults belongs to one frame which has to bailout
+ // as soon as we get back to it.
+ using IonRecoveryMap = Vector<RInstructionResults, 1>;
+ IonRecoveryMap ionRecovery_;
+
+ // If we are bailing out from Ion, then this field should be a non-null
+ // pointer which references the BailoutFrameInfo used to walk the inner
+ // frames. This field is used for all newly constructed JSJitFrameIters to
+ // read the innermost frame information from this bailout data instead of
+ // reading it from the stack.
+ BailoutFrameInfo* bailoutData_;
+
+ // When profiling is enabled, these fields will be updated to reflect the
+ // last pushed frame for this activation, and if that frame has been
+ // left for a call, the native code site of the call.
+ mozilla::Atomic<JitFrameLayout*, mozilla::Relaxed> lastProfilingFrame_;
+ mozilla::Atomic<void*, mozilla::Relaxed> lastProfilingCallSite_;
+ static_assert(sizeof(mozilla::Atomic<void*, mozilla::Relaxed>) ==
+ sizeof(void*),
+ "Atomic should have same memory format as underlying type.");
+
+ // When wasm traps, the signal handler records some data for unwinding
+ // purposes. Wasm code can't trap reentrantly.
+ mozilla::Maybe<wasm::TrapData> wasmTrapData_;
+
+ void clearRematerializedFrames();
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ protected:
+ // Used to verify that live registers don't change between a VM call and
+ // the OsiPoint that follows it. Protected to silence Clang warning.
+ uint32_t checkRegs_ = 0;
+ RegisterDump regs_;
+#endif
+
+ public:
+ explicit JitActivation(JSContext* cx);
+ ~JitActivation();
+
+ bool isProfiling() const {
+ // All JitActivations can be profiled.
+ return true;
+ }
+
+ JitActivation* prevJitActivation() const { return prevJitActivation_; }
+ static size_t offsetOfPrevJitActivation() {
+ return offsetof(JitActivation, prevJitActivation_);
+ }
+
+ bool hasExitFP() const { return !!packedExitFP_; }
+ uint8_t* jsOrWasmExitFP() const {
+ if (hasWasmExitFP()) {
+ return wasm::Frame::untagExitFP(packedExitFP_);
+ }
+ return packedExitFP_;
+ }
+ static size_t offsetOfPackedExitFP() {
+ return offsetof(JitActivation, packedExitFP_);
+ }
+
+ bool hasJSExitFP() const { return !hasWasmExitFP(); }
+
+ uint8_t* jsExitFP() const {
+ MOZ_ASSERT(hasJSExitFP());
+ return packedExitFP_;
+ }
+ void setJSExitFP(uint8_t* fp) { packedExitFP_ = fp; }
+
+#ifdef CHECK_OSIPOINT_REGISTERS
+ void setCheckRegs(bool check) { checkRegs_ = check; }
+ static size_t offsetOfCheckRegs() {
+ return offsetof(JitActivation, checkRegs_);
+ }
+ static size_t offsetOfRegs() { return offsetof(JitActivation, regs_); }
+#endif
+
+ // Look up a rematerialized frame keyed by the fp, rematerializing the
+ // frame if one doesn't already exist. A frame can only be rematerialized
+ // if an IonFrameIterator pointing to the nearest uninlined frame can be
+ // provided, as values need to be read out of snapshots.
+ //
+ // The inlineDepth must be within bounds of the frame pointed to by iter.
+ RematerializedFrame* getRematerializedFrame(
+ JSContext* cx, const JSJitFrameIter& iter, size_t inlineDepth = 0,
+ MaybeReadFallback::FallbackConsequence consequence =
+ MaybeReadFallback::Fallback_Invalidate);
+
+ // Look up a rematerialized frame by the fp. If inlineDepth is out of
+ // bounds of what has been rematerialized, nullptr is returned.
+ RematerializedFrame* lookupRematerializedFrame(uint8_t* top,
+ size_t inlineDepth = 0);
+
+ // Remove all rematerialized frames associated with the fp top from the
+ // Debugger.
+ void removeRematerializedFramesFromDebugger(JSContext* cx, uint8_t* top);
+
+ bool hasRematerializedFrame(uint8_t* top, size_t inlineDepth = 0) {
+ return !!lookupRematerializedFrame(top, inlineDepth);
+ }
+
+ // Remove a previous rematerialization by fp.
+ void removeRematerializedFrame(uint8_t* top);
+
+ void traceRematerializedFrames(JSTracer* trc);
+
+ // Register the results of on Ion frame recovery.
+ bool registerIonFrameRecovery(RInstructionResults&& results);
+
+ // Return the pointer to the Ion frame recovery, if it is already registered.
+ RInstructionResults* maybeIonFrameRecovery(JitFrameLayout* fp);
+
+ // If an Ion frame recovery exists for the |fp| frame exists, then remove it
+ // from the activation.
+ void removeIonFrameRecovery(JitFrameLayout* fp);
+
+ void traceIonRecovery(JSTracer* trc);
+
+ // Return the bailout information if it is registered.
+ const BailoutFrameInfo* bailoutData() const { return bailoutData_; }
+
+ // Register the bailout data when it is constructed.
+ void setBailoutData(BailoutFrameInfo* bailoutData);
+
+ // Unregister the bailout data when the frame is reconstructed.
+ void cleanBailoutData();
+
+ static size_t offsetOfLastProfilingFrame() {
+ return offsetof(JitActivation, lastProfilingFrame_);
+ }
+ JitFrameLayout* lastProfilingFrame() { return lastProfilingFrame_; }
+ void setLastProfilingFrame(JitFrameLayout* ptr) { lastProfilingFrame_ = ptr; }
+
+ static size_t offsetOfLastProfilingCallSite() {
+ return offsetof(JitActivation, lastProfilingCallSite_);
+ }
+ void* lastProfilingCallSite() { return lastProfilingCallSite_; }
+ void setLastProfilingCallSite(void* ptr) { lastProfilingCallSite_ = ptr; }
+
+ // WebAssembly specific attributes.
+ bool hasWasmExitFP() const { return wasm::Frame::isExitFP(packedExitFP_); }
+ wasm::Frame* wasmExitFP() const {
+ MOZ_ASSERT(hasWasmExitFP());
+ return reinterpret_cast<wasm::Frame*>(
+ wasm::Frame::untagExitFP(packedExitFP_));
+ }
+ wasm::Instance* wasmExitInstance() const {
+ return wasm::GetNearestEffectiveInstance(wasmExitFP());
+ }
+ void setWasmExitFP(const wasm::Frame* fp) {
+ if (fp) {
+ MOZ_ASSERT(!wasm::Frame::isExitFP(fp));
+ packedExitFP_ = wasm::Frame::addExitFPTag(fp);
+ MOZ_ASSERT(hasWasmExitFP());
+ } else {
+ packedExitFP_ = nullptr;
+ }
+ }
+ wasm::ExitReason wasmExitReason() const {
+ MOZ_ASSERT(hasWasmExitFP());
+ return wasm::ExitReason::Decode(encodedWasmExitReason_);
+ }
+ static size_t offsetOfEncodedWasmExitReason() {
+ return offsetof(JitActivation, encodedWasmExitReason_);
+ }
+
+ void startWasmTrap(wasm::Trap trap, uint32_t bytecodeOffset,
+ const wasm::RegisterState& state);
+ void finishWasmTrap();
+ bool isWasmTrapping() const { return !!wasmTrapData_; }
+ const wasm::TrapData& wasmTrapData() { return *wasmTrapData_; }
+};
+
+// A filtering of the ActivationIterator to only stop at JitActivations.
+class JitActivationIterator : public ActivationIterator {
+ void settle() {
+ while (!done() && !activation_->isJit()) {
+ ActivationIterator::operator++();
+ }
+ }
+
+ public:
+ explicit JitActivationIterator(JSContext* cx) : ActivationIterator(cx) {
+ settle();
+ }
+
+ JitActivationIterator& operator++() {
+ ActivationIterator::operator++();
+ settle();
+ return *this;
+ }
+};
+
+} // namespace jit
+
+} // namespace js
+
+#endif // vm_JitActivation_h
diff --git a/js/src/vm/List-inl.h b/js/src/vm/List-inl.h
new file mode 100644
index 0000000000..f43dbca512
--- /dev/null
+++ b/js/src/vm/List-inl.h
@@ -0,0 +1,129 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_List_inl_h
+#define vm_List_inl_h
+
+#include "vm/List.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include <stdint.h> // uint32_t
+
+#include "js/RootingAPI.h" // JS::Handle, JS::Rooted
+#include "js/Value.h" // JS::Value, JS::ObjectValue
+#include "vm/JSContext.h" // JSContext
+#include "vm/NativeObject.h" // js::NativeObject
+
+#include "vm/Compartment-inl.h" // JS::Compartment::wrap
+#include "vm/JSObject-inl.h" // js::NewObjectWithGivenProto
+#include "vm/NativeObject-inl.h" // js::NativeObject::*
+#include "vm/Realm-inl.h" // js::AutoRealm
+
+inline /* static */ js::ListObject* js::ListObject::create(JSContext* cx) {
+ return NewObjectWithGivenProto<ListObject>(cx, nullptr);
+}
+
+inline bool js::ListObject::append(JSContext* cx, JS::Handle<JS::Value> value) {
+ uint32_t len = length();
+
+ if (!ensureElements(cx, len + 1)) {
+ return false;
+ }
+
+ ensureDenseInitializedLength(len, 1);
+ setDenseElement(len, value);
+ return true;
+}
+
+inline bool js::ListObject::appendValueAndSize(JSContext* cx,
+ JS::Handle<JS::Value> value,
+ double size) {
+ uint32_t len = length();
+
+ if (!ensureElements(cx, len + 2)) {
+ return false;
+ }
+
+ ensureDenseInitializedLength(len, 2);
+ setDenseElement(len, value);
+ setDenseElement(len + 1, JS::DoubleValue(size));
+ return true;
+}
+
+inline JS::Value js::ListObject::popFirst(JSContext* cx) {
+ uint32_t len = length();
+ MOZ_ASSERT(len > 0);
+
+ JS::Value entry = get(0);
+ if (!tryShiftDenseElements(1)) {
+ moveDenseElements(0, 1, len - 1);
+ setDenseInitializedLength(len - 1);
+ shrinkElements(cx, len - 1);
+ }
+
+ MOZ_ASSERT(length() == len - 1);
+ return entry;
+}
+
+inline void js::ListObject::popFirstPair(JSContext* cx) {
+ uint32_t len = length();
+ MOZ_ASSERT(len > 0);
+ MOZ_ASSERT((len % 2) == 0);
+
+ if (!tryShiftDenseElements(2)) {
+ moveDenseElements(0, 2, len - 2);
+ setDenseInitializedLength(len - 2);
+ shrinkElements(cx, len - 2);
+ }
+
+ MOZ_ASSERT(length() == len - 2);
+}
+
+template <class T>
+inline T& js::ListObject::popFirstAs(JSContext* cx) {
+ return popFirst(cx).toObject().as<T>();
+}
+
+namespace js {
+
+/**
+ * Stores an empty ListObject in the given fixed slot of |obj|.
+ */
+[[nodiscard]] inline bool StoreNewListInFixedSlot(JSContext* cx,
+ JS::Handle<NativeObject*> obj,
+ uint32_t slot) {
+ AutoRealm ar(cx, obj);
+ ListObject* list = ListObject::create(cx);
+ if (!list) {
+ return false;
+ }
+
+ obj->setFixedSlot(slot, JS::ObjectValue(*list));
+ return true;
+}
+
+/**
+ * Given an object |obj| whose fixed slot |slot| contains a ListObject, append
+ * |toAppend| to that list.
+ */
+[[nodiscard]] inline bool AppendToListInFixedSlot(
+ JSContext* cx, JS::Handle<NativeObject*> obj, uint32_t slot,
+ JS::Handle<JSObject*> toAppend) {
+ JS::Rooted<ListObject*> list(
+ cx, &obj->getFixedSlot(slot).toObject().as<ListObject>());
+
+ AutoRealm ar(cx, list);
+ JS::Rooted<JS::Value> val(cx, JS::ObjectValue(*toAppend));
+ if (!cx->compartment()->wrap(cx, &val)) {
+ return false;
+ }
+ return list->append(cx, val);
+}
+
+} // namespace js
+
+#endif // vm_List_inl_h
diff --git a/js/src/vm/List.cpp b/js/src/vm/List.cpp
new file mode 100644
index 0000000000..9028d14e8a
--- /dev/null
+++ b/js/src/vm/List.cpp
@@ -0,0 +1,11 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/List-inl.h"
+
+using namespace js;
+
+const JSClass ListObject::class_ = {"List"};
diff --git a/js/src/vm/List.h b/js/src/vm/List.h
new file mode 100644
index 0000000000..96106b65d7
--- /dev/null
+++ b/js/src/vm/List.h
@@ -0,0 +1,91 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_List_h
+#define vm_List_h
+
+#include "NamespaceImports.h"
+#include "js/Value.h"
+#include "vm/NativeObject.h"
+
+namespace js {
+
+/**
+ * The List specification type, ECMA-262 6.2.1.
+ * <https://tc39.github.io/ecma262/#sec-list-and-record-specification-type>
+ *
+ * Lists are simple mutable sequences of values. Many standards use them.
+ * Abstractly, they're not objects; they don't have properties or prototypes;
+ * they're for internal specification use only. ListObject is our most direct
+ * implementation of a List: store the values in the slots of a JSObject.
+ *
+ * We often implement Lists in other ways. For example, builtin/Utilities.js
+ * contains a completely unrelated List constructor that's used in self-hosted
+ * code. And AsyncGeneratorObject optimizes away the ListObject in the common
+ * case where its internal queue never holds more than one element.
+ *
+ * ListObjects must not be exposed to content scripts.
+ */
+class ListObject : public NativeObject {
+ public:
+ static const JSClass class_;
+
+ [[nodiscard]] inline static ListObject* create(JSContext* cx);
+
+ uint32_t length() const { return getDenseInitializedLength(); }
+
+ bool isEmpty() const { return length() == 0; }
+
+ const Value& get(uint32_t index) const { return getDenseElement(index); }
+
+ template <class T>
+ T& getAs(uint32_t index) const {
+ return get(index).toObject().as<T>();
+ }
+
+ /**
+ * Add an element to the end of the list. Returns false on OOM.
+ */
+ [[nodiscard]] inline bool append(JSContext* cx, HandleValue value);
+
+ /**
+ * Adds |value| and |size| elements to a list consisting of (value, size)
+ * pairs stored in successive elements.
+ *
+ * This function is intended for use by streams code's queue-with-sizes data
+ * structure and related operations. See builtin/streams/QueueWithSizes*.
+ * (You *could* use this on any list of even length without issue, but it's
+ * hard to imagine realistic situations where you'd want to...)
+ */
+ [[nodiscard]] inline bool appendValueAndSize(JSContext* cx, HandleValue value,
+ double size);
+
+ /**
+ * Remove and return the first element of the list.
+ *
+ * Precondition: This list is not empty.
+ */
+ inline JS::Value popFirst(JSContext* cx);
+
+ /**
+ * Remove the first two elements from a nonempty list of (value, size) pairs
+ * of elements.
+ */
+ inline void popFirstPair(JSContext* cx);
+
+ /**
+ * Remove and return the first element of the list.
+ *
+ * Precondition: This list is not empty, and the first element
+ * is an object of class T.
+ */
+ template <class T>
+ inline T& popFirstAs(JSContext* cx);
+};
+
+} // namespace js
+
+#endif // vm_List_h
diff --git a/js/src/vm/MallocProvider.h b/js/src/vm/MallocProvider.h
new file mode 100644
index 0000000000..b9ce9b0a4f
--- /dev/null
+++ b/js/src/vm/MallocProvider.h
@@ -0,0 +1,255 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Hierarchy of SpiderMonkey system memory allocators:
+ *
+ * - System {m,c,re}alloc/new/free: Overridden by jemalloc in most
+ * environments. Do not use these functions directly.
+ *
+ * - js_{m,c,re}alloc/new/free: Wraps the system allocators and adds a
+ * failure injection framework for use by the fuzzers as well as templated,
+ * typesafe variants. See js/public/Utility.h.
+ *
+ * - AllocPolicy: An interface for the js allocators, for use with templates.
+ * These allocators are for system memory whose lifetime is not associated
+ * with a GC thing. See js/public/AllocPolicy.h.
+ *
+ * - SystemAllocPolicy: No extra functionality over bare allocators.
+ *
+ * - TempAllocPolicy: Adds automatic error reporting to the provided
+ * JSContext when allocations fail.
+ *
+ * - ZoneAllocPolicy: Forwards to the Zone MallocProvider.
+ *
+ * - MallocProvider. A mixin base class that handles automatically updating
+ * the GC's state in response to allocations that are tied to a GC lifetime
+ * or are for a particular GC purpose. These allocators must only be used
+ * for memory that will be freed when a GC thing is swept.
+ *
+ * - gc::Zone: Automatically triggers zone GC.
+ * - JSRuntime: Automatically triggers full GC.
+ * - JSContext: Dispatches directly to the runtime.
+ */
+
+#ifndef vm_MallocProvider_h
+#define vm_MallocProvider_h
+
+#include "mozilla/Attributes.h" // MOZ_ALWAYS_INLINE
+#include "mozilla/Likely.h" // MOZ_LIKELY, MOZ_UNLIKELY
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t
+
+#include "js/AllocPolicy.h" // AllocFunction
+#include "js/UniquePtr.h" // UniquePtr
+#include "js/Utility.h" // js_malloc, MallocArena, CalculateAllocSize, CalculateAllocSizeWithExtra, JS::FreePolicy
+
+namespace js {
+
+template <class Client>
+struct MallocProvider {
+ template <class T>
+ T* maybe_pod_arena_malloc(arena_id_t arena, size_t numElems) {
+ T* p = js_pod_arena_malloc<T>(arena, numElems);
+ if (MOZ_LIKELY(p)) {
+ client()->updateMallocCounter(numElems * sizeof(T));
+ }
+ return p;
+ }
+
+ template <class T>
+ T* maybe_pod_arena_calloc(arena_id_t arena, size_t numElems) {
+ T* p = js_pod_arena_calloc<T>(arena, numElems);
+ if (MOZ_LIKELY(p)) {
+ client()->updateMallocCounter(numElems * sizeof(T));
+ }
+ return p;
+ }
+
+ template <class T>
+ T* maybe_pod_arena_realloc(arena_id_t arena, T* prior, size_t oldSize,
+ size_t newSize) {
+ T* p = js_pod_arena_realloc<T>(arena, prior, oldSize, newSize);
+ if (MOZ_LIKELY(p)) {
+ // For compatibility we do not account for realloc that decreases
+ // previously allocated memory.
+ if (newSize > oldSize) {
+ client()->updateMallocCounter((newSize - oldSize) * sizeof(T));
+ }
+ }
+ return p;
+ }
+
+ template <class T>
+ T* maybe_pod_malloc(size_t numElems) {
+ return maybe_pod_arena_malloc<T>(js::MallocArena, numElems);
+ }
+
+ template <class T>
+ T* maybe_pod_calloc(size_t numElems) {
+ return maybe_pod_arena_calloc<T>(js::MallocArena, numElems);
+ }
+
+ template <class T>
+ T* maybe_pod_realloc(T* prior, size_t oldSize, size_t newSize) {
+ return maybe_pod_arena_realloc<T>(js::MallocArena, prior, oldSize, newSize);
+ }
+
+ template <class T>
+ T* pod_malloc() {
+ return pod_malloc<T>(1);
+ }
+
+ template <class T>
+ T* pod_arena_malloc(arena_id_t arena, size_t numElems) {
+ T* p = maybe_pod_arena_malloc<T>(arena, numElems);
+ if (MOZ_LIKELY(p)) {
+ return p;
+ }
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(numElems, &bytes))) {
+ client()->reportAllocationOverflow();
+ return nullptr;
+ }
+ p = (T*)client()->onOutOfMemory(AllocFunction::Malloc, arena, bytes);
+ if (p) {
+ client()->updateMallocCounter(bytes);
+ }
+ return p;
+ }
+
+ template <class T>
+ T* pod_malloc(size_t numElems) {
+ return pod_arena_malloc<T>(js::MallocArena, numElems);
+ }
+
+ template <class T, class U>
+ T* pod_malloc_with_extra(size_t numExtra) {
+ size_t bytes;
+ if (MOZ_UNLIKELY((!CalculateAllocSizeWithExtra<T, U>(numExtra, &bytes)))) {
+ client()->reportAllocationOverflow();
+ return nullptr;
+ }
+ T* p = static_cast<T*>(js_malloc(bytes));
+ if (MOZ_LIKELY(p)) {
+ client()->updateMallocCounter(bytes);
+ return p;
+ }
+ p = (T*)client()->onOutOfMemory(AllocFunction::Malloc, js::MallocArena,
+ bytes);
+ if (p) {
+ client()->updateMallocCounter(bytes);
+ }
+ return p;
+ }
+
+ template <class T>
+ UniquePtr<T[], JS::FreePolicy> make_pod_arena_array(arena_id_t arena,
+ size_t numElems) {
+ return UniquePtr<T[], JS::FreePolicy>(pod_arena_malloc<T>(arena, numElems));
+ }
+
+ template <class T>
+ UniquePtr<T[], JS::FreePolicy> make_pod_array(size_t numElems) {
+ return make_pod_arena_array<T>(js::MallocArena, numElems);
+ }
+
+ template <class T>
+ T* pod_arena_calloc(arena_id_t arena, size_t numElems = 1) {
+ T* p = maybe_pod_arena_calloc<T>(arena, numElems);
+ if (MOZ_LIKELY(p)) {
+ return p;
+ }
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(numElems, &bytes))) {
+ client()->reportAllocationOverflow();
+ return nullptr;
+ }
+ p = (T*)client()->onOutOfMemory(AllocFunction::Calloc, arena, bytes);
+ if (p) {
+ client()->updateMallocCounter(bytes);
+ }
+ return p;
+ }
+
+ template <class T>
+ T* pod_calloc(size_t numElems = 1) {
+ return pod_arena_calloc<T>(js::MallocArena, numElems);
+ }
+
+ template <class T, class U>
+ T* pod_calloc_with_extra(size_t numExtra) {
+ size_t bytes;
+ if (MOZ_UNLIKELY((!CalculateAllocSizeWithExtra<T, U>(numExtra, &bytes)))) {
+ client()->reportAllocationOverflow();
+ return nullptr;
+ }
+ T* p = static_cast<T*>(js_calloc(bytes));
+ if (p) {
+ client()->updateMallocCounter(bytes);
+ return p;
+ }
+ p = (T*)client()->onOutOfMemory(AllocFunction::Calloc, js::MallocArena,
+ bytes);
+ if (p) {
+ client()->updateMallocCounter(bytes);
+ }
+ return p;
+ }
+
+ template <class T>
+ UniquePtr<T[], JS::FreePolicy> make_zeroed_pod_array(size_t numElems) {
+ return UniquePtr<T[], JS::FreePolicy>(pod_calloc<T>(numElems));
+ }
+
+ template <class T>
+ T* pod_arena_realloc(arena_id_t arena, T* prior, size_t oldSize,
+ size_t newSize) {
+ T* p = maybe_pod_arena_realloc(arena, prior, oldSize, newSize);
+ if (MOZ_LIKELY(p)) {
+ return p;
+ }
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(newSize, &bytes))) {
+ client()->reportAllocationOverflow();
+ return nullptr;
+ }
+ p = (T*)client()->onOutOfMemory(AllocFunction::Realloc, arena, bytes,
+ prior);
+ if (p && newSize > oldSize) {
+ client()->updateMallocCounter((newSize - oldSize) * sizeof(T));
+ }
+ return p;
+ }
+
+ template <class T>
+ T* pod_realloc(T* prior, size_t oldSize, size_t newSize) {
+ return pod_arena_realloc<T>(js::MallocArena, prior, oldSize, newSize);
+ }
+
+ JS_DECLARE_NEW_METHODS(new_, pod_malloc<uint8_t>, MOZ_ALWAYS_INLINE)
+ JS_DECLARE_NEW_ARENA_METHODS(
+ arena_new_,
+ [this](arena_id_t arena, size_t size) {
+ return pod_malloc<uint8_t>(size, arena);
+ },
+ MOZ_ALWAYS_INLINE)
+
+ JS_DECLARE_MAKE_METHODS(make_unique, new_, MOZ_ALWAYS_INLINE)
+ JS_DECLARE_MAKE_METHODS(arena_make_unique, arena_new_, MOZ_ALWAYS_INLINE)
+
+ private:
+ Client* client() { return static_cast<Client*>(this); }
+
+ // The Default implementation is a no-op which can be overridden by the
+ // client.
+ void updateMallocCounter(size_t nbytes) {}
+};
+
+} /* namespace js */
+
+#endif /* vm_MallocProvider_h */
diff --git a/js/src/vm/MatchPairs.h b/js/src/vm/MatchPairs.h
new file mode 100644
index 0000000000..6bb60b46dd
--- /dev/null
+++ b/js/src/vm/MatchPairs.h
@@ -0,0 +1,141 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_MatchPairs_h
+#define vm_MatchPairs_h
+
+#include "js/AllocPolicy.h"
+#include "js/Vector.h"
+
+/*
+ * RegExp match results are succinctly represented by pairs of integer
+ * indices delimiting (start, limit] segments of the input string.
+ *
+ * The pair count for a given RegExp match is the capturing parentheses
+ * count plus one for the "0 capturing paren" whole text match.
+ */
+
+namespace js {
+
+struct MatchPair final {
+ int32_t start;
+ int32_t limit;
+
+ static constexpr int32_t NoMatch = -1;
+
+ MatchPair() : start(NoMatch), limit(NoMatch) {}
+
+ MatchPair(int32_t start, int32_t limit) : start(start), limit(limit) {}
+
+ size_t length() const {
+ MOZ_ASSERT(!isUndefined());
+ return limit - start;
+ }
+ bool isUndefined() const { return start < 0; }
+
+ inline bool check() const {
+ MOZ_ASSERT(limit >= start);
+ MOZ_ASSERT_IF(start < 0, start == NoMatch);
+ MOZ_ASSERT_IF(limit < 0, limit == NoMatch);
+ return true;
+ }
+
+ // Note: return int32_t instead of size_t to prevent signed => unsigned
+ // conversions in caller functions.
+ static constexpr int32_t offsetOfStart() {
+ return int32_t(offsetof(MatchPair, start));
+ }
+ static constexpr int32_t offsetOfLimit() {
+ return int32_t(offsetof(MatchPair, limit));
+ }
+};
+
+// MachPairs is used as base class for VectorMatchPairs but can also be
+// stack-allocated (without a Vector) in JIT code.
+class MatchPairs {
+ protected:
+ /* Length of pairs_. */
+ uint32_t pairCount_;
+
+ /* Raw pointer into an allocated MatchPair buffer. */
+ MatchPair* pairs_;
+
+ protected:
+ /* Not used directly: use VectorMatchPairs. */
+ MatchPairs() : pairCount_(0), pairs_(nullptr) {}
+
+ protected:
+ /* Functions used by friend classes. */
+ friend class RegExpShared;
+ friend class RegExpStatics;
+
+ void forgetArray() { pairs_ = nullptr; }
+
+ public:
+ void checkAgainst(size_t inputLength) {
+#ifdef DEBUG
+ for (size_t i = 0; i < pairCount_; i++) {
+ const MatchPair& p = (*this)[i];
+ MOZ_ASSERT(p.check());
+ if (p.isUndefined()) {
+ continue;
+ }
+ MOZ_ASSERT(size_t(p.limit) <= inputLength);
+ }
+#endif
+ }
+
+ /* Querying functions in the style of RegExpStatics. */
+ bool empty() const { return pairCount_ == 0; }
+ size_t pairCount() const {
+ MOZ_ASSERT(pairCount_ > 0);
+ return pairCount_;
+ }
+
+ // Note: return int32_t instead of size_t to prevent signed => unsigned
+ // conversions in caller functions.
+ static constexpr int32_t offsetOfPairs() {
+ return int32_t(offsetof(MatchPairs, pairs_));
+ }
+ static constexpr int32_t offsetOfPairCount() {
+ return int32_t(offsetof(MatchPairs, pairCount_));
+ }
+
+ int32_t* pairsRaw() { return reinterpret_cast<int32_t*>(pairs_); }
+
+ public:
+ size_t length() const { return pairCount_; }
+
+ const MatchPair& operator[](size_t i) const {
+ MOZ_ASSERT(i < pairCount_);
+ return pairs_[i];
+ }
+ MatchPair& operator[](size_t i) {
+ MOZ_ASSERT(i < pairCount_);
+ return pairs_[i];
+ }
+};
+
+class VectorMatchPairs : public MatchPairs {
+ Vector<MatchPair, 10, SystemAllocPolicy> vec_;
+
+ protected:
+ friend class RegExpShared;
+ friend class RegExpStatics;
+
+ /* MatchPair buffer allocator: set pairs_ and pairCount_. */
+ bool allocOrExpandArray(size_t pairCount);
+
+ bool initArrayFrom(VectorMatchPairs& copyFrom);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return vec_.sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+} /* namespace js */
+
+#endif /* vm_MatchPairs_h */
diff --git a/js/src/vm/MemoryMetrics.cpp b/js/src/vm/MemoryMetrics.cpp
new file mode 100644
index 0000000000..7c7d72394c
--- /dev/null
+++ b/js/src/vm/MemoryMetrics.cpp
@@ -0,0 +1,889 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/MemoryMetrics.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+
+#include "gc/GC.h"
+#include "gc/Memory.h"
+#include "gc/Nursery.h"
+#include "gc/PublicIterators.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Ion.h"
+#include "js/HeapAPI.h"
+#include "util/Text.h"
+#include "vm/BigIntType.h"
+#include "vm/HelperThreadState.h"
+#include "vm/JSObject.h"
+#include "vm/JSScript.h"
+#include "vm/PropMap.h"
+#include "vm/Realm.h"
+#include "vm/Runtime.h"
+#include "vm/Shape.h"
+#include "vm/StringType.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmModule.h"
+
+#include "wasm/WasmInstance-inl.h"
+
+using mozilla::MallocSizeOf;
+using mozilla::PodCopy;
+
+using namespace js;
+
+using JS::ObjectPrivateVisitor;
+using JS::RealmStats;
+using JS::RuntimeStats;
+using JS::ZoneStats;
+
+namespace js {
+
+JS_PUBLIC_API size_t MemoryReportingSundriesThreshold() { return 8 * 1024; }
+
+/* static */
+HashNumber InefficientNonFlatteningStringHashPolicy::hash(const Lookup& l) {
+ if (l->isLinear()) {
+ return HashStringChars(&l->asLinear());
+ }
+
+ // Use rope's non-copying hash function.
+ uint32_t hash = 0;
+ if (!l->asRope().hash(&hash)) {
+ MOZ_CRASH("oom");
+ }
+ return hash;
+}
+
+template <typename Char1, typename Char2>
+static bool EqualStringsPure(JSString* s1, JSString* s2) {
+ if (s1->length() != s2->length()) {
+ return false;
+ }
+
+ const Char1* c1;
+ UniquePtr<Char1[], JS::FreePolicy> ownedChars1;
+ JS::AutoCheckCannotGC nogc;
+ if (s1->isLinear()) {
+ c1 = s1->asLinear().chars<Char1>(nogc);
+ } else {
+ ownedChars1 =
+ s1->asRope().copyChars<Char1>(/* tcx */ nullptr, js::MallocArena);
+ if (!ownedChars1) {
+ MOZ_CRASH("oom");
+ }
+ c1 = ownedChars1.get();
+ }
+
+ const Char2* c2;
+ UniquePtr<Char2[], JS::FreePolicy> ownedChars2;
+ if (s2->isLinear()) {
+ c2 = s2->asLinear().chars<Char2>(nogc);
+ } else {
+ ownedChars2 =
+ s2->asRope().copyChars<Char2>(/* tcx */ nullptr, js::MallocArena);
+ if (!ownedChars2) {
+ MOZ_CRASH("oom");
+ }
+ c2 = ownedChars2.get();
+ }
+
+ return EqualChars(c1, c2, s1->length());
+}
+
+/* static */
+bool InefficientNonFlatteningStringHashPolicy::match(const JSString* const& k,
+ const Lookup& l) {
+ // We can't use js::EqualStrings, because that flattens our strings.
+ JSString* s1 = const_cast<JSString*>(k);
+ if (k->hasLatin1Chars()) {
+ return l->hasLatin1Chars() ? EqualStringsPure<Latin1Char, Latin1Char>(s1, l)
+ : EqualStringsPure<Latin1Char, char16_t>(s1, l);
+ }
+
+ return l->hasLatin1Chars() ? EqualStringsPure<char16_t, Latin1Char>(s1, l)
+ : EqualStringsPure<char16_t, char16_t>(s1, l);
+}
+
+} // namespace js
+
+namespace JS {
+
+template <typename CharT>
+static void StoreStringChars(char* buffer, size_t bufferSize, JSString* str) {
+ const CharT* chars;
+ UniquePtr<CharT[], JS::FreePolicy> ownedChars;
+ JS::AutoCheckCannotGC nogc;
+ if (str->isLinear()) {
+ chars = str->asLinear().chars<CharT>(nogc);
+ } else {
+ ownedChars =
+ str->asRope().copyChars<CharT>(/* tcx */ nullptr, js::MallocArena);
+ if (!ownedChars) {
+ MOZ_CRASH("oom");
+ }
+ chars = ownedChars.get();
+ }
+
+ // We might truncate |str| even if it's much shorter than 1024 chars, if
+ // |str| contains unicode chars. Since this is just for a memory reporter,
+ // we don't care.
+ PutEscapedString(buffer, bufferSize, chars, str->length(), /* quote */ 0);
+}
+
+NotableStringInfo::NotableStringInfo(JSString* str, const StringInfo& info)
+ : StringInfo(info), length(str->length()) {
+ size_t bufferSize = std::min(str->length() + 1, size_t(MAX_SAVED_CHARS));
+ buffer.reset(js_pod_malloc<char>(bufferSize));
+ if (!buffer) {
+ MOZ_CRASH("oom");
+ }
+
+ if (str->hasLatin1Chars()) {
+ StoreStringChars<Latin1Char>(buffer.get(), bufferSize, str);
+ } else {
+ StoreStringChars<char16_t>(buffer.get(), bufferSize, str);
+ }
+}
+
+NotableClassInfo::NotableClassInfo(const char* className, const ClassInfo& info)
+ : ClassInfo(info) {
+ className_ = DuplicateString(className);
+ if (!className_) {
+ MOZ_CRASH("oom");
+ }
+}
+
+NotableScriptSourceInfo::NotableScriptSourceInfo(const char* filename,
+ const ScriptSourceInfo& info)
+ : ScriptSourceInfo(info) {
+ filename_ = DuplicateString(filename);
+ if (!filename_) {
+ MOZ_CRASH("oom");
+ }
+}
+
+} // namespace JS
+
+typedef HashSet<ScriptSource*, DefaultHasher<ScriptSource*>, SystemAllocPolicy>
+ SourceSet;
+
+struct StatsClosure {
+ RuntimeStats* rtStats;
+ ObjectPrivateVisitor* opv;
+ SourceSet seenSources;
+ wasm::Metadata::SeenSet wasmSeenMetadata;
+ wasm::Code::SeenSet wasmSeenCode;
+ wasm::Table::SeenSet wasmSeenTables;
+ bool anonymize;
+
+ StatsClosure(RuntimeStats* rt, ObjectPrivateVisitor* v, bool anon)
+ : rtStats(rt), opv(v), anonymize(anon) {}
+};
+
+static void DecommittedPagesChunkCallback(JSRuntime* rt, void* data,
+ gc::TenuredChunk* chunk,
+ const JS::AutoRequireNoGC& nogc) {
+ size_t n = 0;
+ for (uint32_t word : chunk->decommittedPages.Storage()) {
+ n += mozilla::CountPopulation32(word);
+ }
+
+ *static_cast<size_t*>(data) += n * gc::PageSize;
+}
+
+static void StatsZoneCallback(JSRuntime* rt, void* data, Zone* zone,
+ const JS::AutoRequireNoGC& nogc) {
+ // Append a new RealmStats to the vector.
+ RuntimeStats* rtStats = static_cast<StatsClosure*>(data)->rtStats;
+
+ // CollectRuntimeStats reserves enough space.
+ MOZ_ALWAYS_TRUE(rtStats->zoneStatsVector.growBy(1));
+ ZoneStats& zStats = rtStats->zoneStatsVector.back();
+ zStats.initStrings();
+ rtStats->initExtraZoneStats(zone, &zStats, nogc);
+ rtStats->currZoneStats = &zStats;
+
+ zone->addSizeOfIncludingThis(
+ rtStats->mallocSizeOf_, &zStats.code, &zStats.regexpZone, &zStats.jitZone,
+ &zStats.baselineStubsOptimized, &zStats.uniqueIdMap,
+ &zStats.initialPropMapTable, &zStats.shapeTables,
+ &rtStats->runtime.atomsMarkBitmaps, &zStats.compartmentObjects,
+ &zStats.crossCompartmentWrappersTables, &zStats.compartmentsPrivateData,
+ &zStats.scriptCountsMap);
+}
+
+static void StatsRealmCallback(JSContext* cx, void* data, Realm* realm,
+ const JS::AutoRequireNoGC& nogc) {
+ // Append a new RealmStats to the vector.
+ RuntimeStats* rtStats = static_cast<StatsClosure*>(data)->rtStats;
+
+ // CollectRuntimeStats reserves enough space.
+ MOZ_ALWAYS_TRUE(rtStats->realmStatsVector.growBy(1));
+ RealmStats& realmStats = rtStats->realmStatsVector.back();
+ realmStats.initClasses();
+ rtStats->initExtraRealmStats(realm, &realmStats, nogc);
+
+ realm->setRealmStats(&realmStats);
+
+ // Measure the realm object itself, and things hanging off it.
+ realm->addSizeOfIncludingThis(
+ rtStats->mallocSizeOf_, &realmStats.realmObject, &realmStats.realmTables,
+ &realmStats.innerViewsTable, &realmStats.objectMetadataTable,
+ &realmStats.savedStacksSet, &realmStats.nonSyntacticLexicalScopesTable,
+ &realmStats.jitRealm);
+}
+
+static void StatsArenaCallback(JSRuntime* rt, void* data, gc::Arena* arena,
+ JS::TraceKind traceKind, size_t thingSize,
+ const JS::AutoRequireNoGC& nogc) {
+ RuntimeStats* rtStats = static_cast<StatsClosure*>(data)->rtStats;
+
+ // The admin space includes (a) the header fields and (b) the padding
+ // between the end of the header fields and the first GC thing.
+ size_t allocationSpace = gc::Arena::thingsSpan(arena->getAllocKind());
+ rtStats->currZoneStats->gcHeapArenaAdmin += gc::ArenaSize - allocationSpace;
+
+ // We don't call the callback on unused things. So we compute the
+ // unused space like this: arenaUnused = maxArenaUnused - arenaUsed.
+ // We do this by setting arenaUnused to maxArenaUnused here, and then
+ // subtracting thingSize for every used cell, in StatsCellCallback().
+ rtStats->currZoneStats->unusedGCThings.addToKind(traceKind, allocationSpace);
+}
+
+// FineGrained is used for normal memory reporting. CoarseGrained is used by
+// AddSizeOfTab(), which aggregates all the measurements into a handful of
+// high-level numbers, which means that fine-grained reporting would be a waste
+// of effort.
+enum Granularity { FineGrained, CoarseGrained };
+
+static void AddClassInfo(Granularity granularity, RealmStats& realmStats,
+ const char* className, JS::ClassInfo& info) {
+ if (granularity == FineGrained) {
+ if (!className) {
+ className = "<no class name>";
+ }
+ RealmStats::ClassesHashMap::AddPtr p =
+ realmStats.allClasses->lookupForAdd(className);
+ if (!p) {
+ bool ok = realmStats.allClasses->add(p, className, info);
+ // Ignore failure -- we just won't record the
+ // object/shape/base-shape as notable.
+ (void)ok;
+ } else {
+ p->value().add(info);
+ }
+ }
+}
+
+template <Granularity granularity>
+static void CollectScriptSourceStats(StatsClosure* closure, ScriptSource* ss) {
+ RuntimeStats* rtStats = closure->rtStats;
+
+ SourceSet::AddPtr entry = closure->seenSources.lookupForAdd(ss);
+ if (entry) {
+ return;
+ }
+
+ bool ok = closure->seenSources.add(entry, ss);
+ (void)ok; // Not much to be done on failure.
+
+ JS::ScriptSourceInfo info; // This zeroes all the sizes.
+ ss->addSizeOfIncludingThis(rtStats->mallocSizeOf_, &info);
+
+ rtStats->runtime.scriptSourceInfo.add(info);
+
+ if (granularity == FineGrained) {
+ const char* filename = ss->filename();
+ if (!filename) {
+ filename = "<no filename>";
+ }
+
+ JS::RuntimeSizes::ScriptSourcesHashMap::AddPtr p =
+ rtStats->runtime.allScriptSources->lookupForAdd(filename);
+ if (!p) {
+ bool ok = rtStats->runtime.allScriptSources->add(p, filename, info);
+ // Ignore failure -- we just won't record the script source as notable.
+ (void)ok;
+ } else {
+ p->value().add(info);
+ }
+ }
+}
+
+// The various kinds of hashing are expensive, and the results are unused when
+// doing coarse-grained measurements. Skipping them more than doubles the
+// profile speed for complex pages such as gmail.com.
+template <Granularity granularity>
+static void StatsCellCallback(JSRuntime* rt, void* data, JS::GCCellPtr cellptr,
+ size_t thingSize,
+ const JS::AutoRequireNoGC& nogc) {
+ StatsClosure* closure = static_cast<StatsClosure*>(data);
+ RuntimeStats* rtStats = closure->rtStats;
+ ZoneStats* zStats = rtStats->currZoneStats;
+ JS::TraceKind kind = cellptr.kind();
+ switch (kind) {
+ case JS::TraceKind::Object: {
+ JSObject* obj = &cellptr.as<JSObject>();
+ RealmStats& realmStats = obj->maybeCCWRealm()->realmStats();
+ JS::ClassInfo info; // This zeroes all the sizes.
+ info.objectsGCHeap += thingSize;
+
+ if (!obj->isTenured()) {
+ info.objectsGCHeap += Nursery::nurseryCellHeaderSize();
+ }
+
+ obj->addSizeOfExcludingThis(rtStats->mallocSizeOf_, &info,
+ &rtStats->runtime);
+
+ // These classes require special handling due to shared resources which
+ // we must be careful not to report twice.
+ if (obj->is<WasmModuleObject>()) {
+ const wasm::Module& module = obj->as<WasmModuleObject>().module();
+ if (ScriptSource* ss = module.metadata().maybeScriptSource()) {
+ CollectScriptSourceStats<granularity>(closure, ss);
+ }
+ module.addSizeOfMisc(rtStats->mallocSizeOf_, &closure->wasmSeenMetadata,
+ &closure->wasmSeenCode,
+ &info.objectsNonHeapCodeWasm,
+ &info.objectsMallocHeapMisc);
+ } else if (obj->is<WasmInstanceObject>()) {
+ wasm::Instance& instance = obj->as<WasmInstanceObject>().instance();
+ if (ScriptSource* ss = instance.metadata().maybeScriptSource()) {
+ CollectScriptSourceStats<granularity>(closure, ss);
+ }
+ instance.addSizeOfMisc(
+ rtStats->mallocSizeOf_, &closure->wasmSeenMetadata,
+ &closure->wasmSeenCode, &closure->wasmSeenTables,
+ &info.objectsNonHeapCodeWasm, &info.objectsMallocHeapMisc);
+ }
+
+ realmStats.classInfo.add(info);
+
+ const JSClass* clasp = obj->getClass();
+ const char* className = clasp->name;
+ AddClassInfo(granularity, realmStats, className, info);
+
+ if (ObjectPrivateVisitor* opv = closure->opv) {
+ nsISupports* iface;
+ if (opv->getISupports_(obj, &iface) && iface) {
+ realmStats.objectsPrivate += opv->sizeOfIncludingThis(iface);
+ }
+ }
+ break;
+ }
+
+ case JS::TraceKind::Script: {
+ BaseScript* base = &cellptr.as<BaseScript>();
+ RealmStats& realmStats = base->realm()->realmStats();
+ realmStats.scriptsGCHeap += thingSize;
+ realmStats.scriptsMallocHeapData +=
+ base->sizeOfExcludingThis(rtStats->mallocSizeOf_);
+ if (base->hasJitScript()) {
+ JSScript* script = static_cast<JSScript*>(base);
+ script->addSizeOfJitScript(rtStats->mallocSizeOf_,
+ &realmStats.jitScripts,
+ &realmStats.baselineStubsFallback);
+ jit::AddSizeOfBaselineData(script, rtStats->mallocSizeOf_,
+ &realmStats.baselineData);
+ realmStats.ionData +=
+ jit::SizeOfIonData(script, rtStats->mallocSizeOf_);
+ }
+ CollectScriptSourceStats<granularity>(closure, base->scriptSource());
+ break;
+ }
+
+ case JS::TraceKind::String: {
+ JSString* str = &cellptr.as<JSString>();
+ size_t size = thingSize;
+ if (!str->isTenured()) {
+ size += Nursery::nurseryCellHeaderSize();
+ }
+
+ JS::StringInfo info;
+ if (str->hasLatin1Chars()) {
+ info.gcHeapLatin1 = size;
+ info.mallocHeapLatin1 =
+ str->sizeOfExcludingThis(rtStats->mallocSizeOf_);
+ } else {
+ info.gcHeapTwoByte = size;
+ info.mallocHeapTwoByte =
+ str->sizeOfExcludingThis(rtStats->mallocSizeOf_);
+ }
+ info.numCopies = 1;
+
+ zStats->stringInfo.add(info);
+
+ // The primary use case for anonymization is automated crash submission
+ // (to help detect OOM crashes). In that case, we don't want to pay the
+ // memory cost required to do notable string detection.
+ if (granularity == FineGrained && !closure->anonymize) {
+ ZoneStats::StringsHashMap::AddPtr p =
+ zStats->allStrings->lookupForAdd(str);
+ if (!p) {
+ bool ok = zStats->allStrings->add(p, str, info);
+ // Ignore failure -- we just won't record the string as notable.
+ (void)ok;
+ } else {
+ p->value().add(info);
+ }
+ }
+ break;
+ }
+
+ case JS::TraceKind::Symbol:
+ zStats->symbolsGCHeap += thingSize;
+ break;
+
+ case JS::TraceKind::BigInt: {
+ JS::BigInt* bi = &cellptr.as<BigInt>();
+ size_t size = thingSize;
+ if (!bi->isTenured()) {
+ size += Nursery::nurseryCellHeaderSize();
+ }
+ zStats->bigIntsGCHeap += size;
+ zStats->bigIntsMallocHeap +=
+ bi->sizeOfExcludingThis(rtStats->mallocSizeOf_);
+ break;
+ }
+
+ case JS::TraceKind::BaseShape: {
+ JS::ShapeInfo info; // This zeroes all the sizes.
+ info.shapesGCHeapBase += thingSize;
+ // No malloc-heap measurements.
+
+ zStats->shapeInfo.add(info);
+ break;
+ }
+
+ case JS::TraceKind::GetterSetter: {
+ zStats->getterSettersGCHeap += thingSize;
+ break;
+ }
+
+ case JS::TraceKind::PropMap: {
+ PropMap* map = &cellptr.as<PropMap>();
+ if (map->isDictionary()) {
+ zStats->dictPropMapsGCHeap += thingSize;
+ } else if (map->isCompact()) {
+ zStats->compactPropMapsGCHeap += thingSize;
+ } else {
+ MOZ_ASSERT(map->isNormal());
+ zStats->normalPropMapsGCHeap += thingSize;
+ }
+ map->addSizeOfExcludingThis(rtStats->mallocSizeOf_,
+ &zStats->propMapChildren,
+ &zStats->propMapTables);
+ break;
+ }
+
+ case JS::TraceKind::JitCode: {
+ zStats->jitCodesGCHeap += thingSize;
+ // The code for a script is counted in ExecutableAllocator::sizeOfCode().
+ break;
+ }
+
+ case JS::TraceKind::Shape: {
+ Shape* shape = &cellptr.as<Shape>();
+
+ JS::ShapeInfo info; // This zeroes all the sizes.
+ if (shape->isDictionary()) {
+ info.shapesGCHeapDict += thingSize;
+ } else {
+ info.shapesGCHeapShared += thingSize;
+ }
+ shape->addSizeOfExcludingThis(rtStats->mallocSizeOf_, &info);
+ zStats->shapeInfo.add(info);
+ break;
+ }
+
+ case JS::TraceKind::Scope: {
+ Scope* scope = &cellptr.as<Scope>();
+ zStats->scopesGCHeap += thingSize;
+ zStats->scopesMallocHeap +=
+ scope->sizeOfExcludingThis(rtStats->mallocSizeOf_);
+ break;
+ }
+
+ case JS::TraceKind::RegExpShared: {
+ auto regexp = &cellptr.as<RegExpShared>();
+ zStats->regExpSharedsGCHeap += thingSize;
+ zStats->regExpSharedsMallocHeap +=
+ regexp->sizeOfExcludingThis(rtStats->mallocSizeOf_);
+ break;
+ }
+
+ default:
+ MOZ_CRASH("invalid traceKind in StatsCellCallback");
+ }
+
+ // Yes, this is a subtraction: see StatsArenaCallback() for details.
+ zStats->unusedGCThings.addToKind(kind, -thingSize);
+}
+
+void ZoneStats::initStrings() {
+ isTotals = false;
+ allStrings.emplace();
+}
+
+void RealmStats::initClasses() {
+ isTotals = false;
+ allClasses.emplace();
+}
+
+static bool FindNotableStrings(ZoneStats& zStats) {
+ using namespace JS;
+
+ // We should only run FindNotableStrings once per ZoneStats object.
+ MOZ_ASSERT(zStats.notableStrings.empty());
+
+ for (ZoneStats::StringsHashMap::Range r = zStats.allStrings->all();
+ !r.empty(); r.popFront()) {
+ JSString* str = r.front().key();
+ StringInfo& info = r.front().value();
+
+ if (!info.isNotable()) {
+ continue;
+ }
+
+ if (!zStats.notableStrings.emplaceBack(str, info)) {
+ return false;
+ }
+
+ // We're moving this string from a non-notable to a notable bucket, so
+ // subtract it out of the non-notable tallies.
+ zStats.stringInfo.subtract(info);
+ }
+ // Release |allStrings| now, rather than waiting for zStats's destruction, to
+ // reduce peak memory consumption during reporting.
+ zStats.allStrings.reset();
+ return true;
+}
+
+static bool FindNotableClasses(RealmStats& realmStats) {
+ using namespace JS;
+
+ // We should only run FindNotableClasses once per ZoneStats object.
+ MOZ_ASSERT(realmStats.notableClasses.empty());
+
+ for (RealmStats::ClassesHashMap::Range r = realmStats.allClasses->all();
+ !r.empty(); r.popFront()) {
+ const char* className = r.front().key();
+ ClassInfo& info = r.front().value();
+
+ // If this class isn't notable, or if we can't grow the notableStrings
+ // vector, skip this string.
+ if (!info.isNotable()) {
+ continue;
+ }
+
+ if (!realmStats.notableClasses.emplaceBack(className, info)) {
+ return false;
+ }
+
+ // We're moving this class from a non-notable to a notable bucket, so
+ // subtract it out of the non-notable tallies.
+ realmStats.classInfo.subtract(info);
+ }
+ // Release |allClasses| now, rather than waiting for zStats's destruction, to
+ // reduce peak memory consumption during reporting.
+ realmStats.allClasses.reset();
+ return true;
+}
+
+static bool FindNotableScriptSources(JS::RuntimeSizes& runtime) {
+ using namespace JS;
+
+ // We should only run FindNotableScriptSources once per RuntimeSizes.
+ MOZ_ASSERT(runtime.notableScriptSources.empty());
+
+ for (RuntimeSizes::ScriptSourcesHashMap::Range r =
+ runtime.allScriptSources->all();
+ !r.empty(); r.popFront()) {
+ const char* filename = r.front().key();
+ ScriptSourceInfo& info = r.front().value();
+
+ if (!info.isNotable()) {
+ continue;
+ }
+
+ if (!runtime.notableScriptSources.emplaceBack(filename, info)) {
+ return false;
+ }
+
+ // We're moving this script source from a non-notable to a notable
+ // bucket, so subtract its sizes from the non-notable tallies.
+ runtime.scriptSourceInfo.subtract(info);
+ }
+ // Release |allScriptSources| now, rather than waiting for zStats's
+ // destruction, to reduce peak memory consumption during reporting.
+ runtime.allScriptSources.reset();
+ return true;
+}
+
+static bool CollectRuntimeStatsHelper(JSContext* cx, RuntimeStats* rtStats,
+ ObjectPrivateVisitor* opv, bool anonymize,
+ IterateCellCallback statsCellCallback) {
+ // Finish any ongoing incremental GC that may change the data we're gathering
+ // and ensure that we don't do anything that could start another one.
+ gc::FinishGC(cx);
+ JS::AutoAssertNoGC nogc(cx);
+
+ // Wait for any background tasks to finish.
+ WaitForAllHelperThreads();
+
+ JSRuntime* rt = cx->runtime();
+ if (!rtStats->realmStatsVector.reserve(rt->numRealms)) {
+ return false;
+ }
+
+ size_t totalZones = rt->gc.zones().length();
+ if (!rtStats->zoneStatsVector.reserve(totalZones)) {
+ return false;
+ }
+
+ rtStats->gcHeapChunkTotal =
+ size_t(JS_GetGCParameter(cx, JSGC_TOTAL_CHUNKS)) * gc::ChunkSize;
+
+ rtStats->gcHeapUnusedChunks =
+ size_t(JS_GetGCParameter(cx, JSGC_UNUSED_CHUNKS)) * gc::ChunkSize;
+
+ if (js::gc::DecommitEnabled()) {
+ IterateChunks(cx, &rtStats->gcHeapDecommittedPages,
+ DecommittedPagesChunkCallback);
+ }
+
+ // Take the per-compartment measurements.
+ StatsClosure closure(rtStats, opv, anonymize);
+ IterateHeapUnbarriered(cx, &closure, StatsZoneCallback, StatsRealmCallback,
+ StatsArenaCallback, statsCellCallback);
+
+ // Take the "explicit/js/runtime/" measurements.
+ rt->addSizeOfIncludingThis(rtStats->mallocSizeOf_, &rtStats->runtime);
+
+ if (!FindNotableScriptSources(rtStats->runtime)) {
+ return false;
+ }
+
+ JS::ZoneStatsVector& zs = rtStats->zoneStatsVector;
+ ZoneStats& zTotals = rtStats->zTotals;
+
+ // We don't look for notable strings for zTotals. So we first sum all the
+ // zones' measurements to get the totals. Then we find the notable strings
+ // within each zone.
+ for (size_t i = 0; i < zs.length(); i++) {
+ zTotals.addSizes(zs[i]);
+ }
+
+ for (size_t i = 0; i < zs.length(); i++) {
+ if (!FindNotableStrings(zs[i])) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(!zTotals.allStrings);
+
+ JS::RealmStatsVector& realmStats = rtStats->realmStatsVector;
+ RealmStats& realmTotals = rtStats->realmTotals;
+
+ // As with the zones, we sum all realms first, and then get the
+ // notable classes within each zone.
+ for (size_t i = 0; i < realmStats.length(); i++) {
+ realmTotals.addSizes(realmStats[i]);
+ }
+
+ for (size_t i = 0; i < realmStats.length(); i++) {
+ if (!FindNotableClasses(realmStats[i])) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(!realmTotals.allClasses);
+
+ rtStats->gcHeapGCThings = rtStats->zTotals.sizeOfLiveGCThings() +
+ rtStats->realmTotals.sizeOfLiveGCThings();
+
+#ifdef DEBUG
+ // Check that the in-arena measurements look ok.
+ size_t totalArenaSize = rtStats->zTotals.gcHeapArenaAdmin +
+ rtStats->zTotals.unusedGCThings.totalSize() +
+ rtStats->gcHeapGCThings;
+ MOZ_ASSERT(totalArenaSize % gc::ArenaSize == 0);
+#endif
+
+ for (RealmsIter realm(rt); !realm.done(); realm.next()) {
+ realm->nullRealmStats();
+ }
+
+ size_t numDirtyChunks =
+ (rtStats->gcHeapChunkTotal - rtStats->gcHeapUnusedChunks) / gc::ChunkSize;
+ size_t perChunkAdmin =
+ sizeof(gc::TenuredChunk) - (sizeof(gc::Arena) * gc::ArenasPerChunk);
+ rtStats->gcHeapChunkAdmin = numDirtyChunks * perChunkAdmin;
+
+ // |gcHeapUnusedArenas| is the only thing left. Compute it in terms of
+ // all the others. See the comment in RuntimeStats for explanation.
+ rtStats->gcHeapUnusedArenas =
+ rtStats->gcHeapChunkTotal - rtStats->gcHeapDecommittedPages -
+ rtStats->gcHeapUnusedChunks -
+ rtStats->zTotals.unusedGCThings.totalSize() - rtStats->gcHeapChunkAdmin -
+ rtStats->zTotals.gcHeapArenaAdmin - rtStats->gcHeapGCThings;
+ return true;
+}
+
+JS_PUBLIC_API bool JS::CollectGlobalStats(GlobalStats* gStats) {
+ AutoLockHelperThreadState lock;
+
+ // HelperThreadState holds data that is not part of a Runtime. This does
+ // not include data is is currently being processed by a HelperThread.
+ if (IsHelperThreadStateInitialized()) {
+ HelperThreadState().addSizeOfIncludingThis(gStats, lock);
+ }
+
+ return true;
+}
+
+JS_PUBLIC_API bool JS::CollectRuntimeStats(JSContext* cx, RuntimeStats* rtStats,
+ ObjectPrivateVisitor* opv,
+ bool anonymize) {
+ return CollectRuntimeStatsHelper(cx, rtStats, opv, anonymize,
+ StatsCellCallback<FineGrained>);
+}
+
+JS_PUBLIC_API size_t JS::SystemCompartmentCount(JSContext* cx) {
+ size_t n = 0;
+ for (CompartmentsIter comp(cx->runtime()); !comp.done(); comp.next()) {
+ if (IsSystemCompartment(comp)) {
+ ++n;
+ }
+ }
+ return n;
+}
+
+JS_PUBLIC_API size_t JS::UserCompartmentCount(JSContext* cx) {
+ size_t n = 0;
+ for (CompartmentsIter comp(cx->runtime()); !comp.done(); comp.next()) {
+ if (!IsSystemCompartment(comp)) {
+ ++n;
+ }
+ }
+ return n;
+}
+
+JS_PUBLIC_API size_t JS::SystemRealmCount(JSContext* cx) {
+ size_t n = 0;
+ for (RealmsIter realm(cx->runtime()); !realm.done(); realm.next()) {
+ if (realm->isSystem()) {
+ ++n;
+ }
+ }
+ return n;
+}
+
+JS_PUBLIC_API size_t JS::UserRealmCount(JSContext* cx) {
+ size_t n = 0;
+ for (RealmsIter realm(cx->runtime()); !realm.done(); realm.next()) {
+ if (!realm->isSystem()) {
+ ++n;
+ }
+ }
+ return n;
+}
+
+JS_PUBLIC_API size_t JS::PeakSizeOfTemporary(const JSContext* cx) {
+ return cx->tempLifoAlloc().peakSizeOfExcludingThis();
+}
+
+namespace JS {
+
+class SimpleJSRuntimeStats : public JS::RuntimeStats {
+ public:
+ explicit SimpleJSRuntimeStats(MallocSizeOf mallocSizeOf)
+ : JS::RuntimeStats(mallocSizeOf) {}
+
+ virtual void initExtraZoneStats(JS::Zone* zone, JS::ZoneStats* zStats,
+ const JS::AutoRequireNoGC& nogc) override {}
+
+ virtual void initExtraRealmStats(Realm* realm, JS::RealmStats* realmStats,
+ const JS::AutoRequireNoGC& nogc) override {}
+};
+
+JS_PUBLIC_API bool AddSizeOfTab(JSContext* cx, HandleObject obj,
+ MallocSizeOf mallocSizeOf,
+ ObjectPrivateVisitor* opv, TabSizes* sizes) {
+ SimpleJSRuntimeStats rtStats(mallocSizeOf);
+
+ JS::Zone* zone = GetObjectZone(obj);
+
+ size_t numRealms = 0;
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
+ numRealms += comp->realms().length();
+ }
+
+ if (!rtStats.realmStatsVector.reserve(numRealms)) {
+ return false;
+ }
+
+ if (!rtStats.zoneStatsVector.reserve(1)) {
+ return false;
+ }
+
+ // Take the per-compartment measurements. No need to anonymize because
+ // these measurements will be aggregated.
+ StatsClosure closure(&rtStats, opv, /* anonymize = */ false);
+ IterateHeapUnbarrieredForZone(cx, zone, &closure, StatsZoneCallback,
+ StatsRealmCallback, StatsArenaCallback,
+ StatsCellCallback<CoarseGrained>);
+
+ MOZ_ASSERT(rtStats.zoneStatsVector.length() == 1);
+ rtStats.zTotals.addSizes(rtStats.zoneStatsVector[0]);
+
+ for (size_t i = 0; i < rtStats.realmStatsVector.length(); i++) {
+ rtStats.realmTotals.addSizes(rtStats.realmStatsVector[i]);
+ }
+
+ for (RealmsInZoneIter realm(zone); !realm.done(); realm.next()) {
+ realm->nullRealmStats();
+ }
+
+ rtStats.zTotals.addToTabSizes(sizes);
+ rtStats.realmTotals.addToTabSizes(sizes);
+
+ return true;
+}
+
+JS_PUBLIC_API bool AddServoSizeOf(JSContext* cx, MallocSizeOf mallocSizeOf,
+ ObjectPrivateVisitor* opv,
+ ServoSizes* sizes) {
+ SimpleJSRuntimeStats rtStats(mallocSizeOf);
+
+ // No need to anonymize because the results will be aggregated.
+ if (!CollectRuntimeStatsHelper(cx, &rtStats, opv, /* anonymize = */ false,
+ StatsCellCallback<CoarseGrained>))
+ return false;
+
+#ifdef DEBUG
+ size_t gcHeapTotalOriginal = sizes->gcHeapUsed + sizes->gcHeapUnused +
+ sizes->gcHeapAdmin + sizes->gcHeapDecommitted;
+#endif
+
+ rtStats.addToServoSizes(sizes);
+ rtStats.zTotals.addToServoSizes(sizes);
+ rtStats.realmTotals.addToServoSizes(sizes);
+
+#ifdef DEBUG
+ size_t gcHeapTotal = sizes->gcHeapUsed + sizes->gcHeapUnused +
+ sizes->gcHeapAdmin + sizes->gcHeapDecommitted;
+ MOZ_ASSERT(rtStats.gcHeapChunkTotal == gcHeapTotal - gcHeapTotalOriginal);
+#endif
+
+ return true;
+}
+
+} // namespace JS
diff --git a/js/src/vm/ModuleBuilder.h b/js/src/vm/ModuleBuilder.h
new file mode 100644
index 0000000000..3716a6cdd2
--- /dev/null
+++ b/js/src/vm/ModuleBuilder.h
@@ -0,0 +1,118 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ModuleBuilder_h
+#define vm_ModuleBuilder_h
+
+#include "mozilla/Attributes.h" // MOZ_STACK_CLASS
+
+#include "jstypes.h" // JS_PUBLIC_API
+#include "frontend/EitherParser.h" // js::frontend::EitherParser
+#include "frontend/ParserAtom.h" // js::frontend::TaggedParserAtomIndex
+#include "frontend/Stencil.h" // js::frontend::StencilModuleEntry
+#include "frontend/TaggedParserAtomIndexHasher.h" // frontend::TaggedParserAtomIndexHasher
+#include "js/GCVector.h" // JS::GCVector
+
+struct JS_PUBLIC_API JSContext;
+class JS_PUBLIC_API JSAtom;
+
+namespace js {
+
+namespace frontend {
+
+class BinaryNode;
+class ListNode;
+class ParseNode;
+
+} // namespace frontend
+
+// Process a module's parse tree to collate the import and export data used when
+// creating a ModuleObject.
+class MOZ_STACK_CLASS ModuleBuilder {
+ explicit ModuleBuilder(FrontendContext* fc,
+ const frontend::EitherParser& eitherParser);
+
+ public:
+ template <class Parser>
+ explicit ModuleBuilder(FrontendContext* fc, Parser* parser)
+ : ModuleBuilder(fc, frontend::EitherParser(parser)) {}
+
+ bool processImport(frontend::BinaryNode* importNode);
+ bool processExport(frontend::ParseNode* exportNode);
+ bool processExportFrom(frontend::BinaryNode* exportNode);
+
+ bool hasExportedName(frontend::TaggedParserAtomIndex name) const;
+
+ bool buildTables(frontend::StencilModuleMetadata& metadata);
+
+ // During BytecodeEmitter we note top-level functions, and afterwards we must
+ // call finishFunctionDecls on the list.
+ bool noteFunctionDeclaration(FrontendContext* fc, uint32_t funIndex);
+ void finishFunctionDecls(frontend::StencilModuleMetadata& metadata);
+
+ void noteAsync(frontend::StencilModuleMetadata& metadata);
+
+ private:
+ using MaybeModuleRequestIndex = frontend::MaybeModuleRequestIndex;
+ using ModuleRequestVector = frontend::StencilModuleMetadata::RequestVector;
+ using RequestedModuleVector = frontend::StencilModuleMetadata::EntryVector;
+
+ using AtomSet = HashSet<frontend::TaggedParserAtomIndex,
+ frontend::TaggedParserAtomIndexHasher>;
+ using ExportEntryVector = Vector<frontend::StencilModuleEntry>;
+ using ImportEntryMap =
+ HashMap<frontend::TaggedParserAtomIndex, frontend::StencilModuleEntry,
+ frontend::TaggedParserAtomIndexHasher>;
+
+ FrontendContext* fc_;
+ frontend::EitherParser eitherParser_;
+
+ // These are populated while parsing.
+ ModuleRequestVector moduleRequests_;
+ AtomSet requestedModuleSpecifiers_;
+ RequestedModuleVector requestedModules_;
+ ImportEntryMap importEntries_;
+ ExportEntryVector exportEntries_;
+ AtomSet exportNames_;
+
+ // These are populated while emitting bytecode.
+ FunctionDeclarationVector functionDecls_;
+
+ frontend::StencilModuleEntry* importEntryFor(
+ frontend::TaggedParserAtomIndex localName) const;
+
+ bool processExportBinding(frontend::ParseNode* binding);
+ bool processExportArrayBinding(frontend::ListNode* array);
+ bool processExportObjectBinding(frontend::ListNode* obj);
+
+ MaybeModuleRequestIndex appendModuleRequest(
+ frontend::TaggedParserAtomIndex specifier,
+ frontend::ListNode* assertionList);
+
+ bool appendExportEntry(frontend::TaggedParserAtomIndex exportName,
+ frontend::TaggedParserAtomIndex localName,
+ frontend::ParseNode* node = nullptr);
+
+ bool maybeAppendRequestedModule(MaybeModuleRequestIndex moduleRequest,
+ frontend::ParseNode* node);
+
+ void markUsedByStencil(frontend::TaggedParserAtomIndex name);
+
+ [[nodiscard]] bool processAssertions(frontend::StencilModuleRequest& request,
+ frontend::ListNode* assertionList);
+
+ [[nodiscard]] bool isAssertionSupported(
+ JS::ImportAssertion supportedAssertion,
+ frontend::TaggedParserAtomIndex key);
+};
+
+template <typename T>
+ArrayObject* CreateArray(JSContext* cx,
+ const JS::Rooted<JS::GCVector<T>>& vector);
+
+} // namespace js
+
+#endif // vm_ModuleBuilder_h
diff --git a/js/src/vm/Modules.cpp b/js/src/vm/Modules.cpp
new file mode 100644
index 0000000000..fe3a2fb30f
--- /dev/null
+++ b/js/src/vm/Modules.cpp
@@ -0,0 +1,1830 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JavaScript modules (as in, the syntactic construct) implementation. */
+
+#include "vm/Modules.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Utf8.h" // mozilla::Utf8Unit
+
+#include <stdint.h> // uint32_t
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "builtin/ModuleObject.h" // js::FinishDynamicModuleImport, js::{,Requested}ModuleObject
+#include "ds/Sort.h"
+#include "frontend/BytecodeCompiler.h" // js::frontend::CompileModule
+#include "frontend/FrontendContext.h" // js::AutoReportFrontendContext
+#include "js/Context.h" // js::AssertHeapIsIdle
+#include "js/RootingAPI.h" // JS::MutableHandle
+#include "js/Value.h" // JS::Value
+#include "vm/EnvironmentObject.h" // js::ModuleEnvironmentObject
+#include "vm/JSContext.h" // CHECK_THREAD, JSContext
+#include "vm/JSObject.h" // JSObject
+#include "vm/List.h" // ListObject
+#include "vm/Runtime.h" // JSRuntime
+
+#include "vm/JSAtom-inl.h"
+#include "vm/JSContext-inl.h" // JSContext::{c,releaseC}heck
+
+using namespace js;
+
+using mozilla::Utf8Unit;
+
+////////////////////////////////////////////////////////////////////////////////
+// Public API
+
+JS_PUBLIC_API void JS::SetSupportedImportAssertions(
+ JSRuntime* rt, const ImportAssertionVector& assertions) {
+ AssertHeapIsIdle();
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ MOZ_ASSERT(rt->supportedImportAssertions.ref().empty());
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!rt->supportedImportAssertions.ref().appendAll(assertions)) {
+ oomUnsafe.crash("SetSupportedImportAssertions");
+ }
+}
+
+JS_PUBLIC_API JS::ModuleResolveHook JS::GetModuleResolveHook(JSRuntime* rt) {
+ AssertHeapIsIdle();
+
+ return rt->moduleResolveHook;
+}
+
+JS_PUBLIC_API void JS::SetModuleResolveHook(JSRuntime* rt,
+ ModuleResolveHook func) {
+ AssertHeapIsIdle();
+
+ rt->moduleResolveHook = func;
+}
+
+JS_PUBLIC_API JS::ModuleMetadataHook JS::GetModuleMetadataHook(JSRuntime* rt) {
+ AssertHeapIsIdle();
+
+ return rt->moduleMetadataHook;
+}
+
+JS_PUBLIC_API void JS::SetModuleMetadataHook(JSRuntime* rt,
+ ModuleMetadataHook func) {
+ AssertHeapIsIdle();
+
+ rt->moduleMetadataHook = func;
+}
+
+JS_PUBLIC_API JS::ModuleDynamicImportHook JS::GetModuleDynamicImportHook(
+ JSRuntime* rt) {
+ AssertHeapIsIdle();
+
+ return rt->moduleDynamicImportHook;
+}
+
+JS_PUBLIC_API void JS::SetModuleDynamicImportHook(
+ JSRuntime* rt, ModuleDynamicImportHook func) {
+ AssertHeapIsIdle();
+
+ rt->moduleDynamicImportHook = func;
+}
+
+JS_PUBLIC_API bool JS::FinishDynamicModuleImport(
+ JSContext* cx, Handle<JSObject*> evaluationPromise,
+ Handle<Value> referencingPrivate, Handle<JSObject*> moduleRequest,
+ Handle<JSObject*> promise) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(referencingPrivate, promise);
+
+ return js::FinishDynamicModuleImport(
+ cx, evaluationPromise, referencingPrivate, moduleRequest, promise);
+}
+
+template <typename Unit>
+static JSObject* CompileModuleHelper(JSContext* cx,
+ const JS::ReadOnlyCompileOptions& options,
+ JS::SourceText<Unit>& srcBuf) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ JS::Rooted<JSObject*> mod(cx);
+ {
+ AutoReportFrontendContext fc(cx);
+ mod = frontend::CompileModule(cx, &fc, options, srcBuf);
+ }
+ return mod;
+}
+
+JS_PUBLIC_API JSObject* JS::CompileModule(JSContext* cx,
+ const ReadOnlyCompileOptions& options,
+ SourceText<char16_t>& srcBuf) {
+ return CompileModuleHelper(cx, options, srcBuf);
+}
+
+JS_PUBLIC_API JSObject* JS::CompileModule(JSContext* cx,
+ const ReadOnlyCompileOptions& options,
+ SourceText<Utf8Unit>& srcBuf) {
+ return CompileModuleHelper(cx, options, srcBuf);
+}
+
+JS_PUBLIC_API void JS::SetModulePrivate(JSObject* module, const Value& value) {
+ JSRuntime* rt = module->zone()->runtimeFromMainThread();
+ module->as<ModuleObject>().scriptSourceObject()->setPrivate(rt, value);
+}
+
+JS_PUBLIC_API void JS::ClearModulePrivate(JSObject* module) {
+ // |module| may be gray, be careful not to create edges to it.
+ JSRuntime* rt = module->zone()->runtimeFromMainThread();
+ module->as<ModuleObject>().scriptSourceObject()->clearPrivate(rt);
+}
+
+JS_PUBLIC_API JS::Value JS::GetModulePrivate(JSObject* module) {
+ return module->as<ModuleObject>().scriptSourceObject()->getPrivate();
+}
+
+JS_PUBLIC_API bool JS::ModuleLink(JSContext* cx, Handle<JSObject*> moduleArg) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->releaseCheck(moduleArg);
+
+ return js::ModuleLink(cx, moduleArg.as<ModuleObject>());
+}
+
+JS_PUBLIC_API bool JS::ModuleEvaluate(JSContext* cx,
+ Handle<JSObject*> moduleRecord,
+ MutableHandle<JS::Value> rval) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->releaseCheck(moduleRecord);
+
+ return js::ModuleEvaluate(cx, moduleRecord.as<ModuleObject>(), rval);
+}
+
+JS_PUBLIC_API bool JS::ThrowOnModuleEvaluationFailure(
+ JSContext* cx, Handle<JSObject*> evaluationPromise,
+ ModuleErrorBehaviour errorBehaviour) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->releaseCheck(evaluationPromise);
+
+ return OnModuleEvaluationFailure(cx, evaluationPromise, errorBehaviour);
+}
+
+JS_PUBLIC_API uint32_t
+JS::GetRequestedModulesCount(JSContext* cx, Handle<JSObject*> moduleRecord) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(moduleRecord);
+
+ return moduleRecord->as<ModuleObject>().requestedModules().Length();
+}
+
+JS_PUBLIC_API JSString* JS::GetRequestedModuleSpecifier(
+ JSContext* cx, Handle<JSObject*> moduleRecord, uint32_t index) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(moduleRecord);
+
+ auto& module = moduleRecord->as<ModuleObject>();
+ return module.requestedModules()[index].moduleRequest()->specifier();
+}
+
+JS_PUBLIC_API void JS::GetRequestedModuleSourcePos(
+ JSContext* cx, Handle<JSObject*> moduleRecord, uint32_t index,
+ uint32_t* lineNumber, uint32_t* columnNumber) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(moduleRecord);
+ MOZ_ASSERT(lineNumber);
+ MOZ_ASSERT(columnNumber);
+
+ auto& module = moduleRecord->as<ModuleObject>();
+ *lineNumber = module.requestedModules()[index].lineNumber();
+ *columnNumber = module.requestedModules()[index].columnNumber();
+}
+
+JS_PUBLIC_API JSScript* JS::GetModuleScript(JS::HandleObject moduleRecord) {
+ AssertHeapIsIdle();
+
+ return moduleRecord->as<ModuleObject>().script();
+}
+
+JS_PUBLIC_API JSObject* JS::GetModuleObject(HandleScript moduleScript) {
+ AssertHeapIsIdle();
+ MOZ_ASSERT(moduleScript->isModule());
+
+ return moduleScript->module();
+}
+
+JS_PUBLIC_API JSObject* JS::GetModuleNamespace(JSContext* cx,
+ HandleObject moduleRecord) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(moduleRecord);
+ MOZ_ASSERT(moduleRecord->is<ModuleObject>());
+
+ return GetOrCreateModuleNamespace(cx, moduleRecord.as<ModuleObject>());
+}
+
+JS_PUBLIC_API JSObject* JS::GetModuleForNamespace(
+ JSContext* cx, HandleObject moduleNamespace) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(moduleNamespace);
+ MOZ_ASSERT(moduleNamespace->is<ModuleNamespaceObject>());
+
+ return &moduleNamespace->as<ModuleNamespaceObject>().module();
+}
+
+JS_PUBLIC_API JSObject* JS::GetModuleEnvironment(JSContext* cx,
+ Handle<JSObject*> moduleObj) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(moduleObj);
+ MOZ_ASSERT(moduleObj->is<ModuleObject>());
+
+ return moduleObj->as<ModuleObject>().environment();
+}
+
+JS_PUBLIC_API JSObject* JS::CreateModuleRequest(
+ JSContext* cx, Handle<JSString*> specifierArg) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ Rooted<JSAtom*> specifierAtom(cx, AtomizeString(cx, specifierArg));
+ if (!specifierAtom) {
+ return nullptr;
+ }
+
+ return ModuleRequestObject::create(cx, specifierAtom, nullptr);
+}
+
+JS_PUBLIC_API JSString* JS::GetModuleRequestSpecifier(
+ JSContext* cx, Handle<JSObject*> moduleRequestArg) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(moduleRequestArg);
+
+ return moduleRequestArg->as<ModuleRequestObject>().specifier();
+}
+
+JS_PUBLIC_API void JS::ClearModuleEnvironment(JSObject* moduleObj) {
+ MOZ_ASSERT(moduleObj);
+ AssertHeapIsIdle();
+
+ js::ModuleEnvironmentObject* env =
+ moduleObj->as<js::ModuleObject>().environment();
+ if (!env) {
+ return;
+ }
+
+ const JSClass* clasp = env->getClass();
+ uint32_t numReserved = JSCLASS_RESERVED_SLOTS(clasp);
+ uint32_t numSlots = env->slotSpan();
+ for (uint32_t i = numReserved; i < numSlots; i++) {
+ env->setSlot(i, UndefinedValue());
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Internal implementation
+
+class ResolveSetEntry {
+ ModuleObject* module_;
+ JSAtom* exportName_;
+
+ public:
+ ResolveSetEntry(ModuleObject* module, JSAtom* exportName)
+ : module_(module), exportName_(exportName) {}
+
+ ModuleObject* module() const { return module_; }
+ JSAtom* exportName() const { return exportName_; }
+
+ void trace(JSTracer* trc) {
+ TraceRoot(trc, &module_, "ResolveSetEntry::module_");
+ TraceRoot(trc, &exportName_, "ResolveSetEntry::exportName_");
+ }
+};
+
+using ResolveSet = GCVector<ResolveSetEntry, 0, SystemAllocPolicy>;
+
+using ModuleSet =
+ GCHashSet<ModuleObject*, DefaultHasher<ModuleObject*>, SystemAllocPolicy>;
+
+static ModuleObject* HostResolveImportedModule(
+ JSContext* cx, Handle<ModuleObject*> module,
+ Handle<ModuleRequestObject*> moduleRequest,
+ ModuleStatus expectedMinimumStatus);
+static bool ModuleResolveExport(JSContext* cx, Handle<ModuleObject*> module,
+ Handle<JSAtom*> exportName,
+ MutableHandle<ResolveSet> resolveSet,
+ MutableHandle<Value> result);
+static ModuleNamespaceObject* ModuleNamespaceCreate(
+ JSContext* cx, Handle<ModuleObject*> module,
+ MutableHandle<UniquePtr<ExportNameVector>> exports);
+static bool InnerModuleLinking(JSContext* cx, Handle<ModuleObject*> module,
+ MutableHandle<ModuleVector> stack, size_t index,
+ size_t* indexOut);
+static bool InnerModuleEvaluation(JSContext* cx, Handle<ModuleObject*> module,
+ MutableHandle<ModuleVector> stack,
+ size_t index, size_t* indexOut);
+static bool ExecuteAsyncModule(JSContext* cx, Handle<ModuleObject*> module);
+static bool GatherAvailableModuleAncestors(
+ JSContext* cx, Handle<ModuleObject*> module,
+ MutableHandle<ModuleVector> execList);
+
+static const char* ModuleStatusName(ModuleStatus status) {
+ switch (status) {
+ case ModuleStatus::Unlinked:
+ return "Unlinked";
+ case ModuleStatus::Linking:
+ return "Linking";
+ case ModuleStatus::Linked:
+ return "Linked";
+ case ModuleStatus::Evaluating:
+ return "Evaluating";
+ case ModuleStatus::EvaluatingAsync:
+ return "EvaluatingAsync";
+ case ModuleStatus::Evaluated:
+ return "Evaluated";
+ default:
+ MOZ_CRASH("Unexpected ModuleStatus");
+ }
+}
+
+static bool ContainsElement(Handle<ExportNameVector> list, JSAtom* atom) {
+ for (JSAtom* a : list) {
+ if (a == atom) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool ContainsElement(Handle<ModuleVector> stack, ModuleObject* module) {
+ for (ModuleObject* m : stack) {
+ if (m == module) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+#ifdef DEBUG
+static size_t CountElements(Handle<ModuleVector> stack, ModuleObject* module) {
+ size_t count = 0;
+ for (ModuleObject* m : stack) {
+ if (m == module) {
+ count++;
+ }
+ }
+
+ return count;
+}
+#endif
+
+// https://tc39.es/ecma262/#sec-getexportednames
+// ES2023 16.2.1.6.2 GetExportedNames
+static bool ModuleGetExportedNames(
+ JSContext* cx, Handle<ModuleObject*> module,
+ MutableHandle<ModuleSet> exportStarSet,
+ MutableHandle<ExportNameVector> exportedNames) {
+ // Step 4. Let exportedNames be a new empty List.
+ MOZ_ASSERT(exportedNames.empty());
+
+ // Step 2. If exportStarSet contains module, then:
+ if (exportStarSet.has(module)) {
+ // Step 2.a. We've reached the starting point of an export * circularity.
+ // Step 2.b. Return a new empty List.
+ return true;
+ }
+
+ // Step 3. Append module to exportStarSet.
+ if (!exportStarSet.put(module)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Step 5. For each ExportEntry Record e of module.[[LocalExportEntries]], do:
+ for (const ExportEntry& e : module->localExportEntries()) {
+ // Step 5.a. Assert: module provides the direct binding for this export.
+ // Step 5.b. Append e.[[ExportName]] to exportedNames.
+ if (!exportedNames.append(e.exportName())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ // Step 6. For each ExportEntry Record e of module.[[IndirectExportEntries]],
+ // do:
+ for (const ExportEntry& e : module->indirectExportEntries()) {
+ // Step 6.a. Assert: module imports a specific binding for this export.
+ // Step 6.b. Append e.[[ExportName]] to exportedNames.
+ if (!exportedNames.append(e.exportName())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ // Step 7. For each ExportEntry Record e of module.[[StarExportEntries]], do:
+ Rooted<ModuleRequestObject*> moduleRequest(cx);
+ Rooted<ModuleObject*> requestedModule(cx);
+ Rooted<JSAtom*> name(cx);
+ for (const ExportEntry& e : module->starExportEntries()) {
+ // Step 7.a. Let requestedModule be ? HostResolveImportedModule(module,
+ // e.[[ModuleRequest]]).
+ moduleRequest = e.moduleRequest();
+ requestedModule = HostResolveImportedModule(cx, module, moduleRequest,
+ ModuleStatus::Unlinked);
+ if (!requestedModule) {
+ return false;
+ }
+
+ // Step 7.b. Let starNames be ?
+ // requestedModule.GetExportedNames(exportStarSet).
+ Rooted<ExportNameVector> starNames(cx);
+ if (!ModuleGetExportedNames(cx, requestedModule, exportStarSet,
+ &starNames)) {
+ return false;
+ }
+
+ // Step 7.c. For each element n of starNames, do:
+ for (JSAtom* name : starNames) {
+ // Step 7.c.i. If SameValue(n, "default") is false, then:
+ if (name != cx->names().default_) {
+ // Step 7.c.i.1. If n is not an element of exportedNames, then:
+ if (!ContainsElement(exportedNames, name)) {
+ // Step 7.c.i.1.a. Append n to exportedNames.
+ if (!exportedNames.append(name)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+ }
+ }
+ }
+
+ // Step 8. Return exportedNames.
+ return true;
+}
+
+static void ThrowUnexpectedModuleStatus(JSContext* cx, ModuleStatus status) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_MODULE_STATUS, ModuleStatusName(status));
+}
+
+static ModuleObject* HostResolveImportedModule(
+ JSContext* cx, Handle<ModuleObject*> module,
+ Handle<ModuleRequestObject*> moduleRequest,
+ ModuleStatus expectedMinimumStatus) {
+ MOZ_ASSERT(module);
+ MOZ_ASSERT(moduleRequest);
+
+ Rooted<Value> referencingPrivate(cx, JS::GetModulePrivate(module));
+ Rooted<ModuleObject*> requestedModule(cx);
+ requestedModule =
+ CallModuleResolveHook(cx, referencingPrivate, moduleRequest);
+ if (!requestedModule) {
+ return nullptr;
+ }
+
+ if (requestedModule->status() < expectedMinimumStatus) {
+ ThrowUnexpectedModuleStatus(cx, requestedModule->status());
+ return nullptr;
+ }
+
+ return requestedModule;
+}
+
+// https://tc39.es/ecma262/#sec-resolveexport
+// ES2023 16.2.1.6.3 ResolveExport
+//
+// Returns an value describing the location of the resolved export or indicating
+// a failure.
+//
+// On success this returns a resolved binding record: { module, bindingName }
+//
+// There are two failure cases:
+//
+// - If no definition was found or the request is found to be circular, *null*
+// is returned.
+//
+// - If the request is found to be ambiguous, the string `"ambiguous"` is
+// returned.
+//
+bool js::ModuleResolveExport(JSContext* cx, Handle<ModuleObject*> module,
+ Handle<JSAtom*> exportName,
+ MutableHandle<Value> result) {
+ // Step 1. If resolveSet is not present, set resolveSet to a new empty List.
+ Rooted<ResolveSet> resolveSet(cx);
+
+ return ::ModuleResolveExport(cx, module, exportName, &resolveSet, result);
+}
+
+static bool CreateResolvedBindingObject(JSContext* cx,
+ Handle<ModuleObject*> module,
+ Handle<JSAtom*> bindingName,
+ MutableHandle<Value> result) {
+ Rooted<ResolvedBindingObject*> obj(
+ cx, ResolvedBindingObject::create(cx, module, bindingName));
+ if (!obj) {
+ return false;
+ }
+
+ result.setObject(*obj);
+ return true;
+}
+
+static bool ModuleResolveExport(JSContext* cx, Handle<ModuleObject*> module,
+ Handle<JSAtom*> exportName,
+ MutableHandle<ResolveSet> resolveSet,
+ MutableHandle<Value> result) {
+ // Step 2. For each Record { [[Module]], [[ExportName]] } r of resolveSet, do:
+ for (const auto& entry : resolveSet) {
+ // Step 2.a. If module and r.[[Module]] are the same Module Record and
+ // SameValue(exportName, r.[[ExportName]]) is true, then:
+ if (entry.module() == module && entry.exportName() == exportName) {
+ // Step 2.a.i. Assert: This is a circular import request.
+ // Step 2.a.ii. Return null.
+ result.setNull();
+ return true;
+ }
+ }
+
+ // Step 3. Append the Record { [[Module]]: module, [[ExportName]]: exportName
+ // } to resolveSet.
+ if (!resolveSet.emplaceBack(module, exportName)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Step 4. For each ExportEntry Record e of module.[[LocalExportEntries]], do:
+ for (const ExportEntry& e : module->localExportEntries()) {
+ // Step 4.a. If SameValue(exportName, e.[[ExportName]]) is true, then:
+ if (exportName == e.exportName()) {
+ // Step 4.a.i. Assert: module provides the direct binding for this export.
+ // Step 4.a.ii. Return ResolvedBinding Record { [[Module]]: module,
+ // [[BindingName]]: e.[[LocalName]] }.
+ Rooted<JSAtom*> localName(cx, e.localName());
+ return CreateResolvedBindingObject(cx, module, localName, result);
+ }
+ }
+
+ // Step 5. For each ExportEntry Record e of module.[[IndirectExportEntries]],
+ // do:
+ Rooted<ModuleRequestObject*> moduleRequest(cx);
+ Rooted<ModuleObject*> importedModule(cx);
+ Rooted<JSAtom*> name(cx);
+ for (const ExportEntry& e : module->indirectExportEntries()) {
+ // Step 5.a. If SameValue(exportName, e.[[ExportName]]) is true, then:
+ if (exportName == e.exportName()) {
+ // Step 5.a.i. Let importedModule be ? HostResolveImportedModule(module,
+ // e.[[ModuleRequest]]).
+ moduleRequest = e.moduleRequest();
+ importedModule = HostResolveImportedModule(cx, module, moduleRequest,
+ ModuleStatus::Unlinked);
+ if (!importedModule) {
+ return false;
+ }
+
+ // Step 5.a.ii. If e.[[ImportName]] is all, then:
+ if (!e.importName()) {
+ // Step 5.a.ii.1. Assert: module does not provide the direct binding for
+ // this export.
+ // Step 5.a.ii.2. Return ResolvedBinding Record { [[Module]]:
+ // importedModule, [[BindingName]]: namespace }.
+ name = cx->names().starNamespaceStar;
+ return CreateResolvedBindingObject(cx, importedModule, name, result);
+ } else {
+ // Step 5.a.iii.1. Assert: module imports a specific binding for this
+ // export.
+ // Step 5.a.iii.2. Return ?
+ // importedModule.ResolveExport(e.[[ImportName]],
+ // resolveSet).
+ name = e.importName();
+ return ModuleResolveExport(cx, importedModule, name, resolveSet,
+ result);
+ }
+ }
+ }
+
+ // Step 6. If SameValue(exportName, "default") is true, then:
+ if (exportName == cx->names().default_) {
+ // Step 6.a. Assert: A default export was not explicitly defined by this
+ // module.
+ // Step 6.b. Return null.
+ // Step 6.c. NOTE: A default export cannot be provided by an export * from
+ // "mod" declaration.
+ result.setNull();
+ return true;
+ }
+
+ // Step 7. Let starResolution be null.
+ Rooted<ResolvedBindingObject*> starResolution(cx);
+
+ // Step 8. For each ExportEntry Record e of module.[[StarExportEntries]], do:
+ Rooted<Value> resolution(cx);
+ Rooted<ResolvedBindingObject*> binding(cx);
+ for (const ExportEntry& e : module->starExportEntries()) {
+ // Step 8.a. Let importedModule be ? HostResolveImportedModule(module,
+ // e.[[ModuleRequest]]).
+ moduleRequest = e.moduleRequest();
+ importedModule = HostResolveImportedModule(cx, module, moduleRequest,
+ ModuleStatus::Unlinked);
+ if (!importedModule) {
+ return false;
+ }
+
+ // Step 8.b. Let resolution be ? importedModule.ResolveExport(exportName,
+ // resolveSet).
+ if (!ModuleResolveExport(cx, importedModule, exportName, resolveSet,
+ &resolution)) {
+ return false;
+ }
+
+ // Step 8.c. If resolution is ambiguous, return ambiguous.
+ if (resolution == StringValue(cx->names().ambiguous)) {
+ result.set(resolution);
+ return true;
+ }
+
+ // Step 8.d. If resolution is not null, then:
+ if (!resolution.isNull()) {
+ // Step 8.d.i. Assert: resolution is a ResolvedBinding Record.
+ binding = &resolution.toObject().as<ResolvedBindingObject>();
+
+ // Step 8.d.ii. If starResolution is null, set starResolution to
+ // resolution.
+ if (!starResolution) {
+ starResolution = binding;
+ } else {
+ // Step 8.d.iii. Else:
+ // Step 8.d.iii.1. Assert: There is more than one * import that includes
+ // the requested name.
+ // Step 8.d.iii.2. If resolution.[[Module]] and
+ // starResolution.[[Module]] are not the same Module
+ // Record, return ambiguous.
+ // Step 8.d.iii.3. If resolution.[[BindingName]] is namespace and
+ // starResolution.[[BindingName]] is not namespace, or
+ // if resolution.[[BindingName]] is not namespace and
+ // starResolution.[[BindingName]] is namespace, return
+ // ambiguous.
+ // Step 8.d.iii.4. If resolution.[[BindingName]] is a String,
+ // starResolution.[[BindingName]] is a String, and
+ // SameValue(resolution.[[BindingName]],
+ // starResolution.[[BindingName]]) is false, return
+ // ambiguous.
+ if (binding->module() != starResolution->module() ||
+ binding->bindingName() != starResolution->bindingName()) {
+ result.set(StringValue(cx->names().ambiguous));
+ return true;
+ }
+ }
+ }
+ }
+
+ // Step 9. Return starResolution.
+ result.setObjectOrNull(starResolution);
+ return true;
+}
+
+// https://tc39.es/ecma262/#sec-getmodulenamespace
+// ES2023 16.2.1.10 GetModuleNamespace
+ModuleNamespaceObject* js::GetOrCreateModuleNamespace(
+ JSContext* cx, Handle<ModuleObject*> module) {
+ // Step 1. Assert: If module is a Cyclic Module Record, then module.[[Status]]
+ // is not unlinked.
+ MOZ_ASSERT(module->status() != ModuleStatus::Unlinked);
+
+ // Step 2. Let namespace be module.[[Namespace]].
+ Rooted<ModuleNamespaceObject*> ns(cx, module->namespace_());
+
+ // Step 3. If namespace is empty, then:
+ if (!ns) {
+ // Step 3.a. Let exportedNames be ? module.GetExportedNames().
+ Rooted<ModuleSet> exportStarSet(cx);
+ Rooted<ExportNameVector> exportedNames(cx);
+ if (!ModuleGetExportedNames(cx, module, &exportStarSet, &exportedNames)) {
+ return nullptr;
+ }
+
+ // Step 3.b. Let unambiguousNames be a new empty List.
+ Rooted<UniquePtr<ExportNameVector>> unambiguousNames(
+ cx, cx->make_unique<ExportNameVector>());
+ if (!unambiguousNames) {
+ return nullptr;
+ }
+
+ // Step 3.c. For each element name of exportedNames, do:
+ Rooted<JSAtom*> name(cx);
+ Rooted<Value> resolution(cx);
+ for (JSAtom* atom : exportedNames) {
+ name = atom;
+
+ // Step 3.c.i. Let resolution be ? module.ResolveExport(name).
+ if (!ModuleResolveExport(cx, module, name, &resolution)) {
+ return nullptr;
+ }
+
+ // Step 3.c.ii. If resolution is a ResolvedBinding Record, append name to
+ // unambiguousNames.
+ if (resolution.isObject() && !unambiguousNames->append(name)) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ }
+
+ // Step 3.d. Set namespace to ModuleNamespaceCreate(module,
+ // unambiguousNames).
+ ns = ModuleNamespaceCreate(cx, module, &unambiguousNames);
+ }
+
+ // Step 4. Return namespace.
+ return ns;
+}
+
+static bool IsResolvedBinding(JSContext* cx, Handle<Value> resolution) {
+ MOZ_ASSERT(resolution.isObjectOrNull() ||
+ resolution.toString() == cx->names().ambiguous);
+ return resolution.isObject();
+}
+
+static void InitNamespaceBinding(JSContext* cx,
+ Handle<ModuleEnvironmentObject*> env,
+ Handle<JSAtom*> name,
+ Handle<ModuleNamespaceObject*> ns) {
+ // The property already exists in the evironment but is not writable, so set
+ // the slot directly.
+ RootedId id(cx, AtomToId(name));
+ mozilla::Maybe<PropertyInfo> prop = env->lookup(cx, id);
+ MOZ_ASSERT(prop.isSome());
+ env->setSlot(prop->slot(), ObjectValue(*ns));
+}
+
+struct AtomComparator {
+ bool operator()(JSAtom* a, JSAtom* b, bool* lessOrEqualp) {
+ int32_t result = CompareStrings(a, b);
+ *lessOrEqualp = (result <= 0);
+ return true;
+ }
+};
+
+// https://tc39.es/ecma262/#sec-modulenamespacecreate
+// ES2023 10.4.6.12 ModuleNamespaceCreate
+static ModuleNamespaceObject* ModuleNamespaceCreate(
+ JSContext* cx, Handle<ModuleObject*> module,
+ MutableHandle<UniquePtr<ExportNameVector>> exports) {
+ // Step 1. Assert: module.[[Namespace]] is empty.
+ MOZ_ASSERT(!module->namespace_());
+
+ // Step 6. Let sortedExports be a List whose elements are the elements of
+ // exports ordered as if an Array of the same values had been sorted
+ // using %Array.prototype.sort% using undefined as comparefn.
+ ExportNameVector scratch;
+ if (!scratch.resize(exports->length())) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ MOZ_ALWAYS_TRUE(MergeSort(exports->begin(), exports->length(),
+ scratch.begin(), AtomComparator()));
+
+ // Steps 2 - 5.
+ Rooted<ModuleNamespaceObject*> ns(
+ cx, ModuleObject::createNamespace(cx, module, exports));
+ if (!ns) {
+ return nullptr;
+ }
+
+ // Pre-compute all binding mappings now instead of on each access.
+ // See:
+ // https://tc39.es/ecma262/#sec-module-namespace-exotic-objects-get-p-receiver
+ // ES2023 10.4.6.8 Module Namespace Exotic Object [[Get]]
+ Rooted<JSAtom*> name(cx);
+ Rooted<Value> resolution(cx);
+ Rooted<ResolvedBindingObject*> binding(cx);
+ Rooted<ModuleObject*> importedModule(cx);
+ Rooted<ModuleNamespaceObject*> importedNamespace(cx);
+ Rooted<JSAtom*> bindingName(cx);
+ for (JSAtom* atom : ns->exports()) {
+ name = atom;
+
+ if (!ModuleResolveExport(cx, module, name, &resolution)) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(IsResolvedBinding(cx, resolution));
+ binding = &resolution.toObject().as<ResolvedBindingObject>();
+ importedModule = binding->module();
+ bindingName = binding->bindingName();
+
+ if (bindingName == cx->names().starNamespaceStar) {
+ importedNamespace = GetOrCreateModuleNamespace(cx, importedModule);
+ if (!importedNamespace) {
+ return nullptr;
+ }
+
+ // The spec uses an immutable binding here but we have already generated
+ // bytecode for an indirect binding. Instead, use an indirect binding to
+ // "*namespace*" slot of the target environment.
+ Rooted<ModuleEnvironmentObject*> env(
+ cx, &importedModule->initialEnvironment());
+ InitNamespaceBinding(cx, env, bindingName, importedNamespace);
+ }
+
+ if (!ns->addBinding(cx, name, importedModule, bindingName)) {
+ return nullptr;
+ }
+ }
+
+ // Step 10. Return M.
+ return ns;
+}
+
+static void ThrowResolutionError(JSContext* cx, Handle<ModuleObject*> module,
+ Handle<Value> resolution, bool isDirectImport,
+ Handle<JSAtom*> name, uint32_t line,
+ uint32_t column) {
+ MOZ_ASSERT(line != 0);
+
+ bool isAmbiguous = resolution == StringValue(cx->names().ambiguous);
+
+ static constexpr unsigned ErrorNumbers[2][2] = {
+ {JSMSG_AMBIGUOUS_IMPORT, JSMSG_MISSING_IMPORT},
+ {JSMSG_AMBIGUOUS_INDIRECT_EXPORT, JSMSG_MISSING_INDIRECT_EXPORT}};
+ unsigned errorNumber = ErrorNumbers[isDirectImport][isAmbiguous];
+
+ const JSErrorFormatString* errorString =
+ GetErrorMessage(nullptr, errorNumber);
+ MOZ_ASSERT(errorString);
+
+ MOZ_ASSERT(errorString->argCount == 0);
+ Rooted<JSString*> message(cx, JS_NewStringCopyZ(cx, errorString->format));
+ if (!message) {
+ return;
+ }
+
+ Rooted<JSString*> separator(cx, JS_NewStringCopyZ(cx, ": "));
+ if (!separator) {
+ return;
+ }
+
+ message = ConcatStrings<CanGC>(cx, message, separator);
+ if (!message) {
+ return;
+ }
+
+ message = ConcatStrings<CanGC>(cx, message, name);
+ if (!message) {
+ return;
+ }
+
+ RootedString filename(cx);
+ if (const char* chars = module->script()->filename()) {
+ filename =
+ JS_NewStringCopyUTF8Z(cx, JS::ConstUTF8CharsZ(chars, strlen(chars)));
+ } else {
+ filename = cx->names().empty;
+ }
+ if (!filename) {
+ return;
+ }
+
+ RootedValue error(cx);
+ if (!JS::CreateError(cx, JSEXN_SYNTAXERR, nullptr, filename, line, column,
+ nullptr, message, JS::NothingHandleValue, &error)) {
+ return;
+ }
+
+ cx->setPendingException(error, nullptr);
+}
+
+// https://tc39.es/ecma262/#sec-source-text-module-record-initialize-environment
+// ES2023 16.2.1.6.4 InitializeEnvironment
+bool js::ModuleInitializeEnvironment(JSContext* cx,
+ Handle<ModuleObject*> module) {
+ MOZ_ASSERT(module->status() == ModuleStatus::Linking);
+
+ // Step 1. For each ExportEntry Record e of module.[[IndirectExportEntries]],
+ // do:
+ Rooted<JSAtom*> exportName(cx);
+ Rooted<Value> resolution(cx);
+ for (const ExportEntry& e : module->indirectExportEntries()) {
+ // Step 1.a. Let resolution be ? module.ResolveExport(e.[[ExportName]]).
+ exportName = e.exportName();
+ if (!ModuleResolveExport(cx, module, exportName, &resolution)) {
+ return false;
+ }
+
+ // Step 1.b. If resolution is null or ambiguous, throw a SyntaxError
+ // exception.
+ if (!IsResolvedBinding(cx, resolution)) {
+ ThrowResolutionError(cx, module, resolution, false, exportName,
+ e.lineNumber(), e.columnNumber());
+ return false;
+ }
+ }
+
+ // Step 5. Let env be NewModuleEnvironment(realm.[[GlobalEnv]]).
+ // Step 6. Set module.[[Environment]] to env.
+ // Note that we have already created the environment by this point.
+ Rooted<ModuleEnvironmentObject*> env(cx, &module->initialEnvironment());
+
+ // Step 7. For each ImportEntry Record in of module.[[ImportEntries]], do:
+ Rooted<ModuleRequestObject*> moduleRequest(cx);
+ Rooted<ModuleObject*> importedModule(cx);
+ Rooted<JSAtom*> importName(cx);
+ Rooted<JSAtom*> localName(cx);
+ Rooted<ModuleObject*> sourceModule(cx);
+ Rooted<JSAtom*> bindingName(cx);
+ for (const ImportEntry& in : module->importEntries()) {
+ // Step 7.a. Let importedModule be ! HostResolveImportedModule(module,
+ // in.[[ModuleRequest]]).
+ moduleRequest = in.moduleRequest();
+ importedModule = HostResolveImportedModule(cx, module, moduleRequest,
+ ModuleStatus::Linking);
+ if (!importedModule) {
+ return false;
+ }
+
+ localName = in.localName();
+ importName = in.importName();
+
+ // Step 7.c. If in.[[ImportName]] is namespace-object, then:
+ if (!importName) {
+ // Step 7.c.i. Let namespace be ? GetModuleNamespace(importedModule).
+ Rooted<ModuleNamespaceObject*> ns(
+ cx, GetOrCreateModuleNamespace(cx, importedModule));
+ if (!ns) {
+ return false;
+ }
+
+ // Step 7.c.ii. Perform ! env.CreateImmutableBinding(in.[[LocalName]],
+ // true). This happens when the environment is created.
+
+ // Step 7.c.iii. Perform ! env.InitializeBinding(in.[[LocalName]],
+ // namespace).
+ InitNamespaceBinding(cx, env, localName, ns);
+ } else {
+ // Step 7.d. Else:
+ // Step 7.d.i. Let resolution be ?
+ // importedModule.ResolveExport(in.[[ImportName]]).
+ if (!ModuleResolveExport(cx, importedModule, importName, &resolution)) {
+ return false;
+ }
+
+ // Step 7.d.ii. If resolution is null or ambiguous, throw a SyntaxError
+ // exception.
+ if (!IsResolvedBinding(cx, resolution)) {
+ ThrowResolutionError(cx, module, resolution, true, importName,
+ in.lineNumber(), in.columnNumber());
+ return false;
+ }
+
+ auto* binding = &resolution.toObject().as<ResolvedBindingObject>();
+ sourceModule = binding->module();
+ bindingName = binding->bindingName();
+
+ // Step 7.d.iii. If resolution.[[BindingName]] is namespace, then:
+ if (bindingName == cx->names().starNamespaceStar) {
+ // Step 7.d.iii.1. Let namespace be ?
+ // GetModuleNamespace(resolution.[[Module]]).
+ Rooted<ModuleNamespaceObject*> ns(
+ cx, GetOrCreateModuleNamespace(cx, sourceModule));
+ if (!ns) {
+ return false;
+ }
+
+ // Step 7.d.iii.2. Perform !
+ // env.CreateImmutableBinding(in.[[LocalName]], true).
+ // Step 7.d.iii.3. Perform ! env.InitializeBinding(in.[[LocalName]],
+ // namespace).
+ //
+ // This should be InitNamespaceBinding, but we have already generated
+ // bytecode assuming an indirect binding. Instead, ensure a special
+ // "*namespace*"" binding exists on the target module's environment. We
+ // then generate an indirect binding to this synthetic binding.
+ Rooted<ModuleEnvironmentObject*> sourceEnv(
+ cx, &sourceModule->initialEnvironment());
+ InitNamespaceBinding(cx, sourceEnv, bindingName, ns);
+ if (!env->createImportBinding(cx, localName, sourceModule,
+ bindingName)) {
+ return false;
+ }
+ } else {
+ // Step 7.d.iv. Else:
+ // Step 7.d.iv.1. 1. Perform env.CreateImportBinding(in.[[LocalName]],
+ // resolution.[[Module]], resolution.[[BindingName]]).
+ if (!env->createImportBinding(cx, localName, sourceModule,
+ bindingName)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ // Steps 8-26.
+ //
+ // Some of these do not need to happen for practical purposes. For steps
+ // 21-23, the bindings that can be handled in a similar way to regulars
+ // scripts are done separately. Function Declarations are special due to
+ // hoisting and are handled within this function. See ModuleScope and
+ // ModuleEnvironmentObject for further details.
+
+ // Step 24. For each element d of lexDeclarations, do:
+ // Step 24.a. For each element dn of the BoundNames of d, do:
+ // Step 24.a.iii. If d is a FunctionDeclaration, a GeneratorDeclaration, an
+ // AsyncFunctionDeclaration, or an AsyncGeneratorDeclaration,
+ // then:
+ // Step 24.a.iii.1 Let fo be InstantiateFunctionObject of d with arguments env
+ // and privateEnv.
+ // Step 24.a.iii.2. Perform ! env.InitializeBinding(dn, fo).
+ return ModuleObject::instantiateFunctionDeclarations(cx, module);
+}
+
+// https://tc39.es/ecma262/#sec-moduledeclarationlinking
+// ES2023 16.2.1.5.1 Link
+bool js::ModuleLink(JSContext* cx, Handle<ModuleObject*> module) {
+ // Step 1. Assert: module.[[Status]] is not linking or evaluating.
+ ModuleStatus status = module->status();
+ if (status == ModuleStatus::Linking || status == ModuleStatus::Evaluating) {
+ ThrowUnexpectedModuleStatus(cx, status);
+ return false;
+ }
+
+ // Step 2. Let stack be a new empty List.
+ Rooted<ModuleVector> stack(cx);
+
+ // Step 3. Let result be Completion(InnerModuleLinking(module, stack, 0)).
+ size_t ignored;
+ bool ok = InnerModuleLinking(cx, module, &stack, 0, &ignored);
+
+ // Step 4. If result is an abrupt completion, then:
+ if (!ok) {
+ // Step 4.a. For each Cyclic Module Record m of stack, do:
+ for (ModuleObject* m : stack) {
+ // Step 4.a.i. Assert: m.[[Status]] is linking.
+ MOZ_ASSERT(m->status() == ModuleStatus::Linking);
+ // Step 4.a.ii. Set m.[[Status]] to unlinked.
+ m->setStatus(ModuleStatus::Unlinked);
+ m->clearDfsIndexes();
+ }
+
+ // Step 4.b. Assert: module.[[Status]] is unlinked.
+ MOZ_ASSERT(module->status() == ModuleStatus::Unlinked);
+
+ // Step 4.c.
+ return false;
+ }
+
+ // Step 5. Assert: module.[[Status]] is linked, evaluating-async, or
+ // evaluated.
+ MOZ_ASSERT(module->status() == ModuleStatus::Linked ||
+ module->status() == ModuleStatus::EvaluatingAsync ||
+ module->status() == ModuleStatus::Evaluated);
+
+ // Step 6. Assert: stack is empty.
+ MOZ_ASSERT(stack.empty());
+
+ // Step 7. Return unused.
+ return true;
+}
+
+// https://tc39.es/ecma262/#sec-InnerModuleLinking
+// ES2023 16.2.1.5.1.1 InnerModuleLinking
+static bool InnerModuleLinking(JSContext* cx, Handle<ModuleObject*> module,
+ MutableHandle<ModuleVector> stack, size_t index,
+ size_t* indexOut) {
+ // Step 2. If module.[[Status]] is linking, linked, evaluating-async, or
+ // evaluated, then:
+ if (module->status() == ModuleStatus::Linking ||
+ module->status() == ModuleStatus::Linked ||
+ module->status() == ModuleStatus::EvaluatingAsync ||
+ module->status() == ModuleStatus::Evaluated) {
+ // Step 2.a. Return index.
+ *indexOut = index;
+ return true;
+ }
+
+ // Step 3. Assert: module.[[Status]] is unlinked.
+ if (module->status() != ModuleStatus::Unlinked) {
+ ThrowUnexpectedModuleStatus(cx, module->status());
+ return false;
+ }
+
+ // Step 8. Append module to stack.
+ // Do this before changing the status so that we can recover on failure.
+ if (!stack.append(module)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Step 4. Set module.[[Status]] to linking.
+ module->setStatus(ModuleStatus::Linking);
+
+ // Step 5. Set module.[[DFSIndex]] to index.
+ module->setDfsIndex(index);
+
+ // Step 6. Set module.[[DFSAncestorIndex]] to index.
+ module->setDfsAncestorIndex(index);
+
+ // Step 7. Set index to index + 1.
+ index++;
+
+ // Step 9. For each String required that is an element of
+ // module.[[RequestedModules]], do:
+ Rooted<ModuleRequestObject*> moduleRequest(cx);
+ Rooted<ModuleObject*> requiredModule(cx);
+ for (const RequestedModule& request : module->requestedModules()) {
+ moduleRequest = request.moduleRequest();
+
+ // Step 9.a. Let requiredModule be ? HostResolveImportedModule(module,
+ // required).
+ requiredModule = HostResolveImportedModule(cx, module, moduleRequest,
+ ModuleStatus::Unlinked);
+ if (!requiredModule) {
+ return false;
+ }
+
+ // Step 9.b. Set index to ? InnerModuleLinking(requiredModule, stack,
+ // index).
+ if (!InnerModuleLinking(cx, requiredModule, stack, index, &index)) {
+ return false;
+ }
+
+ // Step 9.c. If requiredModule is a Cyclic Module Record, then:
+ // Step 9.c.i. Assert: requiredModule.[[Status]] is either linking, linked,
+ // evaluating-async, or evaluated.
+ MOZ_ASSERT(requiredModule->status() == ModuleStatus::Linking ||
+ requiredModule->status() == ModuleStatus::Linked ||
+ requiredModule->status() == ModuleStatus::EvaluatingAsync ||
+ requiredModule->status() == ModuleStatus::Evaluated);
+
+ // Step 9.c.ii. Assert: requiredModule.[[Status]] is linking if and only if
+ // requiredModule is in stack.
+ MOZ_ASSERT((requiredModule->status() == ModuleStatus::Linking) ==
+ ContainsElement(stack, requiredModule));
+
+ // Step 9.c.iii. If requiredModule.[[Status]] is linking, then:
+ if (requiredModule->status() == ModuleStatus::Linking) {
+ // Step 9.c.iii.1. Set module.[[DFSAncestorIndex]] to
+ // min(module.[[DFSAncestorIndex]],
+ // requiredModule.[[DFSAncestorIndex]]).
+ module->setDfsAncestorIndex(std::min(module->dfsAncestorIndex(),
+ requiredModule->dfsAncestorIndex()));
+ }
+ }
+
+ // Step 10. Perform ? module.InitializeEnvironment().
+ if (!ModuleInitializeEnvironment(cx, module)) {
+ return false;
+ }
+
+ // Step 11. Assert: module occurs exactly once in stack.
+ MOZ_ASSERT(CountElements(stack, module) == 1);
+
+ // Step 12. Assert: module.[[DFSAncestorIndex]] <= module.[[DFSIndex]].
+ MOZ_ASSERT(module->dfsAncestorIndex() <= module->dfsIndex());
+
+ // Step 13. If module.[[DFSAncestorIndex]] = module.[[DFSIndex]], then
+ if (module->dfsAncestorIndex() == module->dfsIndex()) {
+ // Step 13.a.
+ bool done = false;
+
+ // Step 13.b. Repeat, while done is false:
+ while (!done) {
+ // Step 13.b.i. Let requiredModule be the last element in stack.
+ // Step 13.b.ii. Remove the last element of stack.
+ requiredModule = stack.popCopy();
+
+ // Step 13.b.iv. Set requiredModule.[[Status]] to linked.
+ requiredModule->setStatus(ModuleStatus::Linked);
+
+ // Step 13.b.v. If requiredModule and module are the same Module Record,
+ // set done to true.
+ done = requiredModule == module;
+ }
+ }
+
+ // Step 14. Return index.
+ *indexOut = index;
+ return true;
+}
+
+// https://tc39.es/ecma262/#sec-moduleevaluation
+// ES2023 16.2.1.5.2 Evaluate
+bool js::ModuleEvaluate(JSContext* cx, Handle<ModuleObject*> moduleArg,
+ MutableHandle<Value> result) {
+ Rooted<ModuleObject*> module(cx, moduleArg);
+
+ // Step 2. Assert: module.[[Status]] is linked, evaluating-async, or
+ // evaluated.
+ ModuleStatus status = module->status();
+ if (status != ModuleStatus::Linked &&
+ status != ModuleStatus::EvaluatingAsync &&
+ status != ModuleStatus::Evaluated) {
+ ThrowUnexpectedModuleStatus(cx, status);
+ return false;
+ }
+
+ // Note: we return early in the error case, as the spec assumes we can get the
+ // cycle root of |module| which may not be available.
+ if (module->hadEvaluationError()) {
+ Rooted<PromiseObject*> capability(cx);
+ if (!module->hasTopLevelCapability()) {
+ capability = ModuleObject::createTopLevelCapability(cx, module);
+ if (!capability) {
+ return false;
+ }
+
+ Rooted<Value> error(cx, module->evaluationError());
+ if (!ModuleObject::topLevelCapabilityReject(cx, module, error)) {
+ return false;
+ }
+ }
+
+ capability = module->topLevelCapability();
+ MOZ_ASSERT(JS::GetPromiseState(capability) == JS::PromiseState::Rejected);
+ MOZ_ASSERT(JS::GetPromiseResult(capability) == module->evaluationError());
+ result.set(ObjectValue(*capability));
+ return true;
+ }
+
+ // Step 3. If module.[[Status]] is evaluating-async or evaluated, set module
+ // to module.[[CycleRoot]].
+ if (module->status() == ModuleStatus::EvaluatingAsync ||
+ module->status() == ModuleStatus::Evaluated) {
+ module = module->getCycleRoot();
+ }
+
+ // Step 4. If module.[[TopLevelCapability]] is not empty, then:
+ if (module->hasTopLevelCapability()) {
+ // Step 4.a. Return module.[[TopLevelCapability]].[[Promise]].
+ result.set(ObjectValue(*module->topLevelCapability()));
+ return true;
+ }
+
+ // Step 5. Let stack be a new empty List.
+ Rooted<ModuleVector> stack(cx);
+
+ // Step 6. Let capability be ! NewPromiseCapability(%Promise%).
+ // Step 7. Set module.[[TopLevelCapability]] to capability.
+ Rooted<PromiseObject*> capability(
+ cx, ModuleObject::createTopLevelCapability(cx, module));
+ if (!capability) {
+ return false;
+ }
+
+ // Step 8. Let result be Completion(InnerModuleEvaluation(module, stack, 0)).
+ size_t ignored;
+ bool ok = InnerModuleEvaluation(cx, module, &stack, 0, &ignored);
+
+ // Step 9. f result is an abrupt completion, then:
+ if (!ok) {
+ // Attempt to take any pending exception, but make sure we still handle
+ // uncatchable exceptions.
+ Rooted<Value> error(cx);
+ if (cx->isExceptionPending()) {
+ std::ignore = cx->getPendingException(&error);
+ cx->clearPendingException();
+ }
+
+ // Step 9.a. For each Cyclic Module Record m of stack, do
+ for (ModuleObject* m : stack) {
+ // Step 9.a.i. Assert: m.[[Status]] is evaluating.
+ MOZ_ASSERT(m->status() == ModuleStatus::Evaluating);
+
+ // Step 9.a.ii. Set m.[[Status]] to evaluated.
+ // Step 9.a.iii. Set m.[[EvaluationError]] to result.
+ m->setEvaluationError(error);
+ }
+
+ // Handle OOM when appending to the stack or over-recursion errors.
+ if (stack.empty() && !module->hadEvaluationError()) {
+ module->setEvaluationError(error);
+ }
+
+ // Step 9.b. Assert: module.[[Status]] is evaluated.
+ MOZ_ASSERT(module->status() == ModuleStatus::Evaluated);
+
+ // Step 9.c. Assert: module.[[EvaluationError]] is result.
+ MOZ_ASSERT(module->evaluationError() == error);
+
+ // Step 9.d. Perform ! Call(capability.[[Reject]], undefined,
+ // result.[[Value]]).
+ if (!ModuleObject::topLevelCapabilityReject(cx, module, error)) {
+ return false;
+ }
+ } else {
+ // Step 10. Else:
+ // Step 10.a. Assert: module.[[Status]] is evaluating-async or evaluated.
+ MOZ_ASSERT(module->status() == ModuleStatus::EvaluatingAsync ||
+ module->status() == ModuleStatus::Evaluated);
+
+ // Step 10.b. Assert: module.[[EvaluationError]] is empty.
+ MOZ_ASSERT(!module->hadEvaluationError());
+
+ // Step 10.c. If module.[[AsyncEvaluation]] is false, then:
+ if (module->status() == ModuleStatus::Evaluated) {
+ // Step 10.c.ii. Perform ! Call(capability.[[Resolve]], undefined,
+ // undefined).
+ if (!ModuleObject::topLevelCapabilityResolve(cx, module)) {
+ return false;
+ }
+ }
+
+ // Step 10.d. Assert: stack is empty.
+ MOZ_ASSERT(stack.empty());
+ }
+
+ // Step 11. Return capability.[[Promise]].
+ result.set(ObjectValue(*capability));
+ return true;
+}
+
+// https://tc39.es/ecma262/#sec-innermoduleevaluation
+// 16.2.1.5.2.1 InnerModuleEvaluation
+static bool InnerModuleEvaluation(JSContext* cx, Handle<ModuleObject*> module,
+ MutableHandle<ModuleVector> stack,
+ size_t index, size_t* indexOut) {
+ // Step 2. If module.[[Status]] is evaluating-async or evaluated, then:
+ if (module->status() == ModuleStatus::EvaluatingAsync ||
+ module->status() == ModuleStatus::Evaluated) {
+ // Step 2.a. If module.[[EvaluationError]] is empty, return index.
+ if (!module->hadEvaluationError()) {
+ *indexOut = index;
+ return true;
+ }
+
+ // Step 2.b. Otherwise, return ? module.[[EvaluationError]].
+ Rooted<Value> error(cx, module->evaluationError());
+ cx->setPendingException(error, ShouldCaptureStack::Maybe);
+ return false;
+ }
+
+ // Step 3. If module.[[Status]] is evaluating, return index.
+ if (module->status() == ModuleStatus::Evaluating) {
+ *indexOut = index;
+ return true;
+ }
+
+ // Step 4. Assert: module.[[Status]] is linked.
+ MOZ_ASSERT(module->status() == ModuleStatus::Linked);
+
+ // Step 10. Append module to stack.
+ // Do this before changing the status so that we can recover on failure.
+ if (!stack.append(module)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Step 5. Set module.[[Status]] to evaluating.
+ module->setStatus(ModuleStatus::Evaluating);
+
+ // Step 6. Set module.[[DFSIndex]] to index.
+ module->setDfsIndex(index);
+
+ // Step 7. Set module.[[DFSAncestorIndex]] to index.
+ module->setDfsAncestorIndex(index);
+
+ // Step 8. Set module.[[PendingAsyncDependencies]] to 0.
+ module->setPendingAsyncDependencies(0);
+
+ // Step 9. Set index to index + 1.
+ index++;
+
+ // Step 11. For each String required of module.[[RequestedModules]], do:
+ Rooted<ModuleRequestObject*> required(cx);
+ Rooted<ModuleObject*> requiredModule(cx);
+ for (const RequestedModule& request : module->requestedModules()) {
+ required = request.moduleRequest();
+
+ // Step 11.a. Let requiredModule be ! HostResolveImportedModule(module,
+ // required).
+ // Step 11.b. NOTE: Link must be completed successfully prior to invoking
+ // this method, so every requested module is guaranteed to
+ // resolve successfully.
+ requiredModule =
+ HostResolveImportedModule(cx, module, required, ModuleStatus::Linked);
+ if (!requiredModule) {
+ return false;
+ }
+
+ // Step 11.c. Set index to ? InnerModuleEvaluation(requiredModule, stack,
+ // index).
+ if (!InnerModuleEvaluation(cx, requiredModule, stack, index, &index)) {
+ return false;
+ }
+
+ // Step 11.d. If requiredModule is a Cyclic Module Record, then:
+ // Step 11.d.i. Assert: requiredModule.[[Status]] is either evaluating,
+ // evaluating-async, or evaluated.
+ MOZ_ASSERT(requiredModule->status() == ModuleStatus::Evaluating ||
+ requiredModule->status() == ModuleStatus::EvaluatingAsync ||
+ requiredModule->status() == ModuleStatus::Evaluated);
+
+ // Step 11.d.ii. Assert: requiredModule.[[Status]] is evaluating if and only
+ // if requiredModule is in stack.
+ MOZ_ASSERT((requiredModule->status() == ModuleStatus::Evaluating) ==
+ ContainsElement(stack, requiredModule));
+
+ // Step 11.d.iii. If requiredModule.[[Status]] is evaluating, then:
+ if (requiredModule->status() == ModuleStatus::Evaluating) {
+ // Step 11.d.iii.1. Set module.[[DFSAncestorIndex]] to
+ // min(module.[[DFSAncestorIndex]],
+ // requiredModule.[[DFSAncestorIndex]]).
+ module->setDfsAncestorIndex(std::min(module->dfsAncestorIndex(),
+ requiredModule->dfsAncestorIndex()));
+ } else {
+ // Step 11.d.iv. Else:
+ // Step 11.d.iv.1. Set requiredModule to requiredModule.[[CycleRoot]].
+ requiredModule = requiredModule->getCycleRoot();
+
+ // Step 11.d.iv.2. Assert: requiredModule.[[Status]] is evaluating-async
+ // or evaluated.
+ MOZ_ASSERT(requiredModule->status() >= ModuleStatus::EvaluatingAsync ||
+ requiredModule->status() == ModuleStatus::Evaluated);
+
+ // Step 11.d.iv.3. If requiredModule.[[EvaluationError]] is not empty,
+ // return ? requiredModule.[[EvaluationError]].
+ if (requiredModule->hadEvaluationError()) {
+ Rooted<Value> error(cx, requiredModule->evaluationError());
+ cx->setPendingException(error, ShouldCaptureStack::Maybe);
+ return false;
+ }
+ }
+
+ // Step 11.d.v. If requiredModule.[[AsyncEvaluation]] is true, then:
+ if (requiredModule->isAsyncEvaluating() &&
+ requiredModule->status() != ModuleStatus::Evaluated) {
+ // Step 11.d.v.2. Append module to requiredModule.[[AsyncParentModules]].
+ if (!ModuleObject::appendAsyncParentModule(cx, requiredModule, module)) {
+ return false;
+ }
+
+ // Step 11.d.v.1. Set module.[[PendingAsyncDependencies]] to
+ // module.[[PendingAsyncDependencies]] + 1.
+ module->setPendingAsyncDependencies(module->pendingAsyncDependencies() +
+ 1);
+ }
+ }
+
+ // Step 12. If module.[[PendingAsyncDependencies]] > 0 or module.[[HasTLA]] is
+ // true, then:
+ if (module->pendingAsyncDependencies() > 0 || module->hasTopLevelAwait()) {
+ // Step 12.a. Assert: module.[[AsyncEvaluation]] is false and was never
+ // previously set to true.
+ MOZ_ASSERT(!module->isAsyncEvaluating());
+
+ // Step 12.b. Set module.[[AsyncEvaluation]] to true.
+ // Step 12.c. NOTE: The order in which module records have their
+ // [[AsyncEvaluation]] fields transition to true is
+ // significant. (See 16.2.1.5.2.4.)
+ module->setAsyncEvaluating();
+
+ // Step 12.d. If module.[[PendingAsyncDependencies]] is 0, perform
+ // ExecuteAsyncModule(module).
+ if (module->pendingAsyncDependencies() == 0) {
+ if (!ExecuteAsyncModule(cx, module)) {
+ return false;
+ }
+ }
+ } else {
+ // Step 13. Otherwise, perform ? module.ExecuteModule().
+ if (!ModuleObject::execute(cx, module)) {
+ return false;
+ }
+ }
+
+ // Step 14. Assert: module occurs exactly once in stack.
+ MOZ_ASSERT(CountElements(stack, module) == 1);
+
+ // Step 15. Assert: module.[[DFSAncestorIndex]] <= module.[[DFSIndex]].
+ MOZ_ASSERT(module->dfsAncestorIndex() <= module->dfsIndex());
+
+ // Step 16. If module.[[DFSAncestorIndex]] = module.[[DFSIndex]], then:
+ if (module->dfsAncestorIndex() == module->dfsIndex()) {
+ // Step 16.a. Let done be false.
+ bool done = false;
+
+ // Step 16.b. Repeat, while done is false:
+ while (!done) {
+ // Step 16.b.i. Let requiredModule be the last element in stack.
+ // Step 16.b.ii. Remove the last element of stack.
+ requiredModule = stack.popCopy();
+
+ // Step 16.b.iv. If requiredModule.[[AsyncEvaluation]] is false, set
+ // requiredModule.[[Status]] to evaluated.
+ if (!requiredModule->isAsyncEvaluating()) {
+ requiredModule->setStatus(ModuleStatus::Evaluated);
+ } else {
+ // Step 16.b.v. Otherwise, set requiredModule.[[Status]] to
+ // evaluating-async.
+ requiredModule->setStatus(ModuleStatus::EvaluatingAsync);
+ }
+
+ // Step 16.b.vi. If requiredModule and module are the same Module Record,
+ // set done to true.
+ done = requiredModule == module;
+
+ // Step 16.b.vii. Set requiredModule.[[CycleRoot]] to module.
+ requiredModule->setCycleRoot(module);
+ }
+ }
+
+ // Step 17. Return index.
+ *indexOut = index;
+ return true;
+}
+
+// https://tc39.es/ecma262/#sec-execute-async-module
+// ES2023 16.2.1.5.2.2 ExecuteAsyncModule
+static bool ExecuteAsyncModule(JSContext* cx, Handle<ModuleObject*> module) {
+ // Step 1. Assert: module.[[Status]] is evaluating or evaluating-async.
+ MOZ_ASSERT(module->status() == ModuleStatus::Evaluating ||
+ module->status() == ModuleStatus::EvaluatingAsync);
+
+ // Step 2. Assert: module.[[HasTLA]] is true.
+ MOZ_ASSERT(module->hasTopLevelAwait());
+
+ // Steps 3 - 8 are performed by the AsyncAwait opcode.
+
+ // Step 9. Perform ! module.ExecuteModule(capability).
+ // Step 10. Return unused.
+ return ModuleObject::execute(cx, module);
+}
+
+// https://tc39.es/ecma262/#sec-gather-available-ancestors
+// ES2023 16.2.1.5.2.3 GatherAvailableAncestors
+static bool GatherAvailableModuleAncestors(
+ JSContext* cx, Handle<ModuleObject*> module,
+ MutableHandle<ModuleVector> execList) {
+ MOZ_ASSERT(module->status() == ModuleStatus::EvaluatingAsync);
+
+ // Step 1. For each Cyclic Module Record m of module.[[AsyncParentModules]],
+ // do:
+ Rooted<ListObject*> asyncParentModules(cx, module->asyncParentModules());
+ Rooted<ModuleObject*> m(cx);
+ for (uint32_t i = 0; i != asyncParentModules->length(); i++) {
+ m = &asyncParentModules->getDenseElement(i).toObject().as<ModuleObject>();
+
+ // Step 1.a. If execList does not contain m and
+ // m.[[CycleRoot]].[[EvaluationError]] is empty, then:
+ //
+ // Note: we also check whether m.[[EvaluationError]] is empty since an error
+ // in synchronous execution can prevent the CycleRoot field from being set.
+ if (!m->hadEvaluationError() && !m->getCycleRoot()->hadEvaluationError() &&
+ !ContainsElement(execList, m)) {
+ // Step 1.a.i. Assert: m.[[Status]] is evaluating-async.
+ MOZ_ASSERT(m->status() == ModuleStatus::EvaluatingAsync);
+
+ // Step 1.a.ii. Assert: m.[[EvaluationError]] is empty.
+ MOZ_ASSERT(!m->hadEvaluationError());
+
+ // Step 1.a.iii. Assert: m.[[AsyncEvaluation]] is true.
+ MOZ_ASSERT(m->isAsyncEvaluating());
+
+ // Step 1.a.iv. Assert: m.[[PendingAsyncDependencies]] > 0.
+ MOZ_ASSERT(m->pendingAsyncDependencies() > 0);
+
+ // Step 1.a.v. Set m.[[PendingAsyncDependencies]] to
+ // m.[[PendingAsyncDependencies]] - 1.
+ m->setPendingAsyncDependencies(m->pendingAsyncDependencies() - 1);
+
+ // Step 1.a.vi. If m.[[PendingAsyncDependencies]] = 0, then:
+ if (m->pendingAsyncDependencies() == 0) {
+ // Step 1.a.vi.1. Append m to execList.
+ if (!execList.append(m)) {
+ return false;
+ }
+
+ // Step 1.a.vi.2. If m.[[HasTLA]] is false, perform
+ // GatherAvailableAncestors(m, execList).
+ if (!m->hasTopLevelAwait() &&
+ !GatherAvailableModuleAncestors(cx, m, execList)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ // Step 2. Return unused.
+ return true;
+}
+
+struct EvalOrderComparator {
+ bool operator()(ModuleObject* a, ModuleObject* b, bool* lessOrEqualp) {
+ int32_t result = int32_t(a->getAsyncEvaluatingPostOrder()) -
+ int32_t(b->getAsyncEvaluatingPostOrder());
+ *lessOrEqualp = (result <= 0);
+ return true;
+ }
+};
+
+static void RejectExecutionWithPendingException(JSContext* cx,
+ Handle<ModuleObject*> module) {
+ // If there is no exception pending then we have been interrupted or have
+ // OOM'd and all bets are off. We reject the execution by throwing
+ // undefined. Not much more we can do.
+ RootedValue exception(cx);
+ if (cx->isExceptionPending()) {
+ std::ignore = cx->getPendingException(&exception);
+ }
+ cx->clearPendingException();
+ AsyncModuleExecutionRejected(cx, module, exception);
+}
+
+// https://tc39.es/ecma262/#sec-async-module-execution-fulfilled
+// ES2023 16.2.1.5.2.4 AsyncModuleExecutionFulfilled
+void js::AsyncModuleExecutionFulfilled(JSContext* cx,
+ Handle<ModuleObject*> module) {
+ // Step 1. If module.[[Status]] is evaluated, then:
+ if (module->status() == ModuleStatus::Evaluated) {
+ // Step 1.a. Assert: module.[[EvaluationError]] is not empty.
+ MOZ_ASSERT(module->hadEvaluationError());
+
+ // Step 1.b. Return unused.
+ return;
+ }
+
+ // Step 2. Assert: module.[[Status]] is evaluating-async.
+ MOZ_ASSERT(module->status() == ModuleStatus::EvaluatingAsync);
+
+ // Step 3. Assert: module.[[AsyncEvaluation]] is true.
+ MOZ_ASSERT(module->isAsyncEvaluating());
+
+ // Step 4. Assert: module.[[EvaluationError]] is empty.
+ MOZ_ASSERT(!module->hadEvaluationError());
+
+ // The following steps are performed in a different order from the
+ // spec. Gather available module ancestors before mutating the module object
+ // as this can fail in our implementation.
+
+ // Step 8. Let execList be a new empty List.
+ Rooted<ModuleVector> execList(cx);
+
+ // Step 9. Perform GatherAvailableAncestors(module, execList).
+ if (!GatherAvailableModuleAncestors(cx, module, &execList)) {
+ RejectExecutionWithPendingException(cx, module);
+ return;
+ }
+
+ // Step 10. Let sortedExecList be a List whose elements are the elements of
+ // execList, in the order in which they had their [[AsyncEvaluation]]
+ // fields set to true in InnerModuleEvaluation.
+
+ Rooted<ModuleVector> scratch(cx);
+ if (!scratch.resize(execList.length())) {
+ ReportOutOfMemory(cx);
+ RejectExecutionWithPendingException(cx, module);
+ return;
+ }
+
+ MOZ_ALWAYS_TRUE(MergeSort(execList.begin(), execList.length(),
+ scratch.begin(), EvalOrderComparator()));
+
+ // Step 11. Assert: All elements of sortedExecList have their
+ // [[AsyncEvaluation]] field set to true,
+ // [[PendingAsyncDependencies]] field set to 0, and
+ // [[EvaluationError]] field set to empty.
+#ifdef DEBUG
+ for (ModuleObject* m : execList) {
+ MOZ_ASSERT(m->isAsyncEvaluating());
+ MOZ_ASSERT(m->pendingAsyncDependencies() == 0);
+ MOZ_ASSERT(!m->hadEvaluationError());
+ }
+#endif
+
+ // Return to original order of steps.
+
+ ModuleObject::onTopLevelEvaluationFinished(module);
+
+ // Step 6. Set module.[[Status]] to evaluated.
+ module->setStatus(ModuleStatus::Evaluated);
+ module->clearAsyncEvaluatingPostOrder();
+
+ // Step 7. If module.[[TopLevelCapability]] is not empty, then:
+ if (module->hasTopLevelCapability()) {
+ // Step 7.a. Assert: module.[[CycleRoot]] is module.
+ MOZ_ASSERT(module->getCycleRoot() == module);
+
+ // Step 7.b. Perform ! Call(module.[[TopLevelCapability]].[[Resolve]],
+ // undefined, undefined).
+ if (!ModuleObject::topLevelCapabilityResolve(cx, module)) {
+ // If Resolve fails, there's nothing more we can do here.
+ cx->clearPendingException();
+ }
+ }
+
+ // Step 12. For each Cyclic Module Record m of sortedExecList, do:
+ Rooted<ModuleObject*> m(cx);
+ for (ModuleObject* obj : execList) {
+ m = obj;
+
+ // Step 12.a. If m.[[Status]] is evaluated, then:
+ if (m->status() == ModuleStatus::Evaluated) {
+ // Step 12.a.i. Assert: m.[[EvaluationError]] is not empty.
+ MOZ_ASSERT(m->hadEvaluationError());
+ } else if (m->hasTopLevelAwait()) {
+ // Step 12.b. Else if m.[[HasTLA]] is true, then:
+ // Step 12.b.i. Perform ExecuteAsyncModule(m).
+ MOZ_ALWAYS_TRUE(ExecuteAsyncModule(cx, m));
+ } else {
+ // Step 12.c. Else:
+ // Step 12.c.i. Let result be m.ExecuteModule().
+ bool ok = ModuleObject::execute(cx, m);
+
+ // Step 12.c.ii. If result is an abrupt completion, then:
+ if (!ok) {
+ // Step 12.c.ii.1. Perform AsyncModuleExecutionRejected(m,
+ // result.[[Value]]).
+ RejectExecutionWithPendingException(cx, m);
+ } else {
+ // Step 12.c.iii. Else:
+ // Step 12.c.iii.1. Set m.[[Status]] to evaluated.
+ m->setStatus(ModuleStatus::Evaluated);
+ m->clearAsyncEvaluatingPostOrder();
+
+ // Step 12.c.iii.2. If m.[[TopLevelCapability]] is not empty, then:
+ if (m->hasTopLevelCapability()) {
+ // Step 12.c.iii.2.a. Assert: m.[[CycleRoot]] is m.
+ MOZ_ASSERT(m->getCycleRoot() == m);
+
+ // Step 12.c.iii.2.b. Perform !
+ // Call(m.[[TopLevelCapability]].[[Resolve]],
+ // undefined, undefined).
+ if (!ModuleObject::topLevelCapabilityResolve(cx, m)) {
+ // If Resolve fails, there's nothing more we can do here.
+ cx->clearPendingException();
+ }
+ }
+ }
+ }
+ }
+
+ // Step 13. Return unused.
+}
+
+// https://tc39.es/ecma262/#sec-async-module-execution-rejected
+// ES2023 16.2.1.5.2.5 AsyncModuleExecutionRejected
+void js::AsyncModuleExecutionRejected(JSContext* cx,
+ Handle<ModuleObject*> module,
+ HandleValue error) {
+ // Step 1. If module.[[Status]] is evaluated, then:
+ if (module->status() == ModuleStatus::Evaluated) {
+ // Step 1.a. Assert: module.[[EvaluationError]] is not empty
+ MOZ_ASSERT(module->hadEvaluationError());
+
+ // Step 1.b. Return unused.
+ return;
+ }
+
+ // Step 2. Assert: module.[[Status]] is evaluating-async.
+ MOZ_ASSERT(module->status() == ModuleStatus::EvaluatingAsync);
+
+ // Step 3. Assert: module.[[AsyncEvaluation]] is true.
+ MOZ_ASSERT(module->isAsyncEvaluating());
+
+ // Step 4. 4. Assert: module.[[EvaluationError]] is empty.
+ MOZ_ASSERT(!module->hadEvaluationError());
+
+ ModuleObject::onTopLevelEvaluationFinished(module);
+
+ // Step 5. Set module.[[EvaluationError]] to ThrowCompletion(error).
+ module->setEvaluationError(error);
+
+ // Step 6. Set module.[[Status]] to evaluated.
+ MOZ_ASSERT(module->status() == ModuleStatus::Evaluated);
+
+ module->clearAsyncEvaluatingPostOrder();
+
+ // Step 7. For each Cyclic Module Record m of module.[[AsyncParentModules]],
+ // do:
+ Rooted<ListObject*> parents(cx, module->asyncParentModules());
+ Rooted<ModuleObject*> parent(cx);
+ for (uint32_t i = 0; i < parents->length(); i++) {
+ parent = &parents->get(i).toObject().as<ModuleObject>();
+
+ // Step 7.a. Perform AsyncModuleExecutionRejected(m, error).
+ AsyncModuleExecutionRejected(cx, parent, error);
+ }
+
+ // Step 8. If module.[[TopLevelCapability]] is not empty, then:
+ if (module->hasTopLevelCapability()) {
+ // Step 8.a. Assert: module.[[CycleRoot]] is module.
+ MOZ_ASSERT(module->getCycleRoot() == module);
+
+ // Step 8.b. Perform ! Call(module.[[TopLevelCapability]].[[Reject]],
+ // undefined, error).
+ if (!ModuleObject::topLevelCapabilityReject(cx, module, error)) {
+ // If Reject fails, there's nothing more we can do here.
+ cx->clearPendingException();
+ }
+ }
+
+ // Step 9. Return unused.
+}
diff --git a/js/src/vm/Modules.h b/js/src/vm/Modules.h
new file mode 100644
index 0000000000..18b97ac3d7
--- /dev/null
+++ b/js/src/vm/Modules.h
@@ -0,0 +1,45 @@
+/* -*- Mode: javascript; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4
+ * -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Modules_h
+#define vm_Modules_h
+
+#include "NamespaceImports.h"
+
+#include "builtin/ModuleObject.h"
+#include "js/AllocPolicy.h"
+#include "js/GCVector.h"
+#include "js/RootingAPI.h"
+
+struct JSContext;
+
+namespace js {
+
+using ModuleVector = GCVector<ModuleObject*, 0, SystemAllocPolicy>;
+
+bool ModuleResolveExport(JSContext* cx, Handle<ModuleObject*> module,
+ Handle<JSAtom*> exportName,
+ MutableHandle<Value> result);
+
+ModuleNamespaceObject* GetOrCreateModuleNamespace(JSContext* cx,
+ Handle<ModuleObject*> module);
+
+bool ModuleInitializeEnvironment(JSContext* cx, Handle<ModuleObject*> module);
+
+bool ModuleLink(JSContext* cx, Handle<ModuleObject*> module);
+
+// Start evaluating the module. If TLA is enabled, result will be a promise.
+bool ModuleEvaluate(JSContext* cx, Handle<ModuleObject*> module,
+ MutableHandle<Value> result);
+
+void AsyncModuleExecutionFulfilled(JSContext* cx, Handle<ModuleObject*> module);
+
+void AsyncModuleExecutionRejected(JSContext* cx, Handle<ModuleObject*> module,
+ HandleValue error);
+
+} // namespace js
+
+#endif // vm_Modules_h
diff --git a/js/src/vm/Monitor.h b/js/src/vm/Monitor.h
new file mode 100644
index 0000000000..6c0fbff0d9
--- /dev/null
+++ b/js/src/vm/Monitor.h
@@ -0,0 +1,72 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Monitor_h
+#define vm_Monitor_h
+
+#include "threading/ConditionVariable.h"
+#include "threading/Mutex.h"
+
+namespace js {
+
+// A base class used for types intended to be used in a parallel
+// fashion. Combines a lock and a condition variable. You can
+// acquire the lock or signal the condition variable using the
+// |AutoLockMonitor| type.
+
+class Monitor {
+ protected:
+ friend class AutoLockMonitor;
+ friend class AutoUnlockMonitor;
+
+ Mutex lock_ MOZ_UNANNOTATED;
+ ConditionVariable condVar_;
+
+ public:
+ explicit Monitor(const MutexId& id) : lock_(id) {}
+};
+
+class AutoLockMonitor : public LockGuard<Mutex> {
+ private:
+ using Base = LockGuard<Mutex>;
+ Monitor& monitor;
+
+ public:
+ explicit AutoLockMonitor(Monitor& monitor)
+ : Base(monitor.lock_), monitor(monitor) {}
+
+ bool isFor(Monitor& other) const { return &monitor.lock_ == &other.lock_; }
+
+ void wait(ConditionVariable& condVar) { condVar.wait(*this); }
+
+ void wait() { wait(monitor.condVar_); }
+
+ void notify(ConditionVariable& condVar) { condVar.notify_one(); }
+
+ void notify() { notify(monitor.condVar_); }
+
+ void notifyAll(ConditionVariable& condVar) { condVar.notify_all(); }
+
+ void notifyAll() { notifyAll(monitor.condVar_); }
+};
+
+class AutoUnlockMonitor {
+ private:
+ Monitor& monitor;
+
+ public:
+ explicit AutoUnlockMonitor(Monitor& monitor) : monitor(monitor) {
+ monitor.lock_.unlock();
+ }
+
+ ~AutoUnlockMonitor() { monitor.lock_.lock(); }
+
+ bool isFor(Monitor& other) const { return &monitor.lock_ == &other.lock_; }
+};
+
+} // namespace js
+
+#endif /* vm_Monitor_h */
diff --git a/js/src/vm/MutexIDs.h b/js/src/vm/MutexIDs.h
new file mode 100644
index 0000000000..0e84a48369
--- /dev/null
+++ b/js/src/vm/MutexIDs.h
@@ -0,0 +1,81 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_MutexIDs_h
+#define vm_MutexIDs_h
+
+#include "threading/Mutex.h"
+
+// Central definition point for mutex ordering.
+//
+// Mutexes can only be acquired in increasing order. This prevents the
+// possibility of deadlock. Mutexes with the same order cannot be held
+// at the same time.
+
+#define FOR_EACH_MUTEX(_) \
+ _(TestMutex, 100) \
+ _(ShellContextWatchdog, 100) \
+ _(ShellWorkerThreads, 100) \
+ _(ShellObjectMailbox, 100) \
+ _(WellKnownParserAtomsInit, 100) \
+ \
+ _(WasmInitBuiltinThunks, 250) \
+ _(WasmLazyStubsTier1, 250) \
+ _(WasmLazyStubsTier2, 251) \
+ \
+ _(StoreBuffer, 275) \
+ \
+ _(GCLock, 300) \
+ \
+ _(GlobalHelperThreadState, 400) \
+ \
+ _(StringsCache, 500) \
+ _(FutexThread, 500) \
+ _(GeckoProfilerStrings, 500) \
+ _(ProtectedRegionTree, 500) \
+ _(ShellOffThreadState, 500) \
+ _(ShellStreamCacheEntryState, 500) \
+ _(SimulatorCacheLock, 500) \
+ _(Arm64SimulatorLock, 500) \
+ _(IonSpewer, 500) \
+ _(PerfSpewer, 500) \
+ _(CacheIRSpewer, 500) \
+ _(DateTimeInfoMutex, 500) \
+ _(ProcessExecutableRegion, 500) \
+ _(BufferStreamState, 500) \
+ _(SharedArrayGrow, 500) \
+ _(SharedImmutableScriptData, 500) \
+ _(WasmTypeIdSet, 500) \
+ _(WasmCodeProfilingLabels, 500) \
+ _(WasmCodeBytesEnd, 500) \
+ _(WasmStreamEnd, 500) \
+ _(WasmStreamStatus, 500) \
+ _(WasmRuntimeInstances, 500) \
+ _(WasmSignalInstallState, 500) \
+ _(WasmHugeMemoryEnabled, 500) \
+ _(MemoryTracker, 500) \
+ _(StencilCache, 500) \
+ _(SourceCompression, 500) \
+ _(GCDelayedMarkingLock, 500) \
+ \
+ _(SharedImmutableStringsCache, 600) \
+ _(IrregexpLazyStatic, 600) \
+ _(ThreadId, 600) \
+ _(WasmCodeSegmentMap, 600) \
+ _(VTuneLock, 600) \
+ _(ShellTelemetry, 600)
+
+namespace js {
+namespace mutexid {
+
+#define DEFINE_MUTEX_ID(name, order) static const MutexId name{#name, order};
+FOR_EACH_MUTEX(DEFINE_MUTEX_ID)
+#undef DEFINE_MUTEX_ID
+
+} // namespace mutexid
+} // namespace js
+
+#endif // vm_MutexIDs_h
diff --git a/js/src/vm/NativeObject-inl.h b/js/src/vm/NativeObject-inl.h
new file mode 100644
index 0000000000..de43d2dc15
--- /dev/null
+++ b/js/src/vm/NativeObject-inl.h
@@ -0,0 +1,908 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_NativeObject_inl_h
+#define vm_NativeObject_inl_h
+
+#include "vm/NativeObject.h"
+
+#include "mozilla/Maybe.h"
+
+#include <type_traits>
+
+#include "gc/Allocator.h"
+#include "gc/GCProbes.h"
+#include "gc/MaybeRooted.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "vm/Compartment.h"
+#include "vm/Iteration.h"
+#include "vm/JSContext.h"
+#include "vm/PlainObject.h"
+#include "vm/PropertyResult.h"
+#include "vm/TypedArrayObject.h"
+
+#include "gc/Heap-inl.h"
+#include "gc/Marking-inl.h"
+#include "gc/ObjectKind-inl.h"
+#include "vm/Compartment-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/Realm-inl.h"
+#include "vm/Shape-inl.h"
+
+#ifdef ENABLE_RECORD_TUPLE
+// Defined in vm/RecordTupleShared.{h,cpp}. We cannot include that file
+// because it causes circular dependencies.
+extern bool js::IsExtendedPrimitive(const JSObject& obj);
+#endif
+
+namespace js {
+
+constexpr ObjectSlots::ObjectSlots(uint32_t capacity,
+ uint32_t dictionarySlotSpan,
+ uint64_t maybeUniqueId)
+ : capacity_(capacity),
+ dictionarySlotSpan_(dictionarySlotSpan),
+ maybeUniqueId_(maybeUniqueId) {
+ MOZ_ASSERT(this->capacity() == capacity);
+ MOZ_ASSERT(this->dictionarySlotSpan() == dictionarySlotSpan);
+}
+
+inline uint32_t NativeObject::numFixedSlotsMaybeForwarded() const {
+ return gc::MaybeForwarded(JSObject::shape())->asNative().numFixedSlots();
+}
+
+inline uint8_t* NativeObject::fixedData(size_t nslots) const {
+ MOZ_ASSERT(ClassCanHaveFixedData(gc::MaybeForwardedObjectClass(this)));
+ MOZ_ASSERT(nslots == numFixedSlotsMaybeForwarded());
+ return reinterpret_cast<uint8_t*>(&fixedSlots()[nslots]);
+}
+
+inline void NativeObject::initDenseElementHole(uint32_t index) {
+ markDenseElementsNotPacked();
+ initDenseElementUnchecked(index, MagicValue(JS_ELEMENTS_HOLE));
+}
+
+inline void NativeObject::setDenseElementHole(uint32_t index) {
+ markDenseElementsNotPacked();
+ setDenseElementUnchecked(index, MagicValue(JS_ELEMENTS_HOLE));
+}
+
+inline void NativeObject::removeDenseElementForSparseIndex(uint32_t index) {
+ MOZ_ASSERT(containsPure(PropertyKey::Int(index)));
+ if (containsDenseElement(index)) {
+ setDenseElementHole(index);
+ }
+}
+
+inline void NativeObject::markDenseElementsNotPacked() {
+ MOZ_ASSERT(is<NativeObject>());
+ getElementsHeader()->markNonPacked();
+}
+
+inline void NativeObject::elementsRangePostWriteBarrier(uint32_t start,
+ uint32_t count) {
+ if (!isTenured()) {
+ return;
+ }
+ for (size_t i = 0; i < count; i++) {
+ const Value& v = elements_[start + i];
+ if (v.isGCThing()) {
+ if (gc::StoreBuffer* sb = v.toGCThing()->storeBuffer()) {
+ sb->putSlot(this, HeapSlot::Element, unshiftedIndex(start + i),
+ count - i);
+ return;
+ }
+ }
+ }
+}
+
+inline void NativeObject::copyDenseElements(uint32_t dstStart, const Value* src,
+ uint32_t count) {
+ MOZ_ASSERT(dstStart + count <= getDenseCapacity());
+ MOZ_ASSERT(isExtensible());
+ MOZ_ASSERT_IF(count > 0, src != nullptr);
+#ifdef DEBUG
+ for (uint32_t i = 0; i < count; ++i) {
+ checkStoredValue(src[i]);
+ }
+#endif
+ if (count == 0) {
+ return;
+ }
+ if (zone()->needsIncrementalBarrier()) {
+ uint32_t numShifted = getElementsHeader()->numShiftedElements();
+ for (uint32_t i = 0; i < count; ++i) {
+ elements_[dstStart + i].set(this, HeapSlot::Element,
+ dstStart + i + numShifted, src[i]);
+ }
+ } else {
+ memcpy(reinterpret_cast<Value*>(&elements_[dstStart]), src,
+ count * sizeof(Value));
+ elementsRangePostWriteBarrier(dstStart, count);
+ }
+}
+
+inline void NativeObject::initDenseElements(NativeObject* src,
+ uint32_t srcStart, uint32_t count) {
+ MOZ_ASSERT(src->getDenseInitializedLength() >= srcStart + count);
+
+ const Value* vp = src->getDenseElements() + srcStart;
+
+ if (!src->denseElementsArePacked()) {
+ // Mark non-packed if we're copying holes or if there are too many elements
+ // to check this efficiently.
+ static constexpr uint32_t MaxCountForPackedCheck = 30;
+ if (count > MaxCountForPackedCheck) {
+ markDenseElementsNotPacked();
+ } else {
+ for (uint32_t i = 0; i < count; i++) {
+ if (vp[i].isMagic(JS_ELEMENTS_HOLE)) {
+ markDenseElementsNotPacked();
+ break;
+ }
+ }
+ }
+ }
+
+ initDenseElements(vp, count);
+}
+
+inline void NativeObject::initDenseElements(const Value* src, uint32_t count) {
+ MOZ_ASSERT(getDenseInitializedLength() == 0);
+ MOZ_ASSERT(count <= getDenseCapacity());
+ MOZ_ASSERT(src);
+ MOZ_ASSERT(isExtensible());
+
+ setDenseInitializedLength(count);
+
+#ifdef DEBUG
+ for (uint32_t i = 0; i < count; ++i) {
+ checkStoredValue(src[i]);
+ }
+#endif
+
+ memcpy(reinterpret_cast<Value*>(elements_), src, count * sizeof(Value));
+ elementsRangePostWriteBarrier(0, count);
+}
+
+inline void NativeObject::initDenseElementRange(uint32_t destStart,
+ NativeObject* src,
+ uint32_t count) {
+ MOZ_ASSERT(count <= src->getDenseInitializedLength());
+
+ // The initialized length must already be set to the correct value.
+ MOZ_ASSERT(destStart + count == getDenseInitializedLength());
+
+ if (!src->denseElementsArePacked()) {
+ markDenseElementsNotPacked();
+ }
+
+ const Value* vp = src->getDenseElements();
+#ifdef DEBUG
+ for (uint32_t i = 0; i < count; ++i) {
+ checkStoredValue(vp[i]);
+ }
+#endif
+ memcpy(reinterpret_cast<Value*>(elements_) + destStart, vp,
+ count * sizeof(Value));
+ elementsRangePostWriteBarrier(destStart, count);
+}
+
+template <typename Iter>
+inline bool NativeObject::initDenseElementsFromRange(JSContext* cx, Iter begin,
+ Iter end) {
+ // This method populates the elements of a particular Array that's an
+ // internal implementation detail of GeneratorObject. Failing any of the
+ // following means the Array has escaped and/or been mistreated.
+ MOZ_ASSERT(isExtensible());
+ MOZ_ASSERT(!isIndexed());
+ MOZ_ASSERT(is<ArrayObject>());
+ MOZ_ASSERT(as<ArrayObject>().lengthIsWritable());
+ MOZ_ASSERT(!denseElementsAreFrozen());
+ MOZ_ASSERT(getElementsHeader()->numShiftedElements() == 0);
+
+ MOZ_ASSERT(getDenseInitializedLength() == 0);
+
+ auto size = end - begin;
+ uint32_t count = uint32_t(size);
+ MOZ_ASSERT(count <= uint32_t(INT32_MAX));
+ if (count > getDenseCapacity()) {
+ if (!growElements(cx, count)) {
+ return false;
+ }
+ }
+
+ HeapSlot* sp = elements_;
+ size_t slot = 0;
+ for (; begin != end; sp++, begin++) {
+ Value v = *begin;
+#ifdef DEBUG
+ checkStoredValue(v);
+#endif
+ sp->init(this, HeapSlot::Element, slot++, v);
+ }
+ MOZ_ASSERT(slot == count);
+
+ getElementsHeader()->initializedLength = count;
+ as<ArrayObject>().setLength(count);
+ return true;
+}
+
+inline bool NativeObject::tryShiftDenseElements(uint32_t count) {
+ MOZ_ASSERT(isExtensible());
+
+ ObjectElements* header = getElementsHeader();
+ if (header->initializedLength == count ||
+ count > ObjectElements::MaxShiftedElements ||
+ header->hasNonwritableArrayLength()) {
+ return false;
+ }
+
+ shiftDenseElementsUnchecked(count);
+ return true;
+}
+
+inline void NativeObject::shiftDenseElementsUnchecked(uint32_t count) {
+ MOZ_ASSERT(isExtensible());
+
+ ObjectElements* header = getElementsHeader();
+ MOZ_ASSERT(count > 0);
+ MOZ_ASSERT(count < header->initializedLength);
+
+ if (MOZ_UNLIKELY(header->numShiftedElements() + count >
+ ObjectElements::MaxShiftedElements)) {
+ moveShiftedElements();
+ header = getElementsHeader();
+ }
+
+ prepareElementRangeForOverwrite(0, count);
+ header->addShiftedElements(count);
+
+ elements_ += count;
+ ObjectElements* newHeader = getElementsHeader();
+ memmove(newHeader, header, sizeof(ObjectElements));
+}
+
+inline void NativeObject::moveDenseElements(uint32_t dstStart,
+ uint32_t srcStart, uint32_t count) {
+ MOZ_ASSERT(dstStart + count <= getDenseCapacity());
+ MOZ_ASSERT(srcStart + count <= getDenseInitializedLength());
+ MOZ_ASSERT(isExtensible());
+
+ /*
+ * Using memmove here would skip write barriers. Also, we need to consider
+ * an array containing [A, B, C], in the following situation:
+ *
+ * 1. Incremental GC marks slot 0 of array (i.e., A), then returns to JS code.
+ * 2. JS code moves slots 1..2 into slots 0..1, so it contains [B, C, C].
+ * 3. Incremental GC finishes by marking slots 1 and 2 (i.e., C).
+ *
+ * Since normal marking never happens on B, it is very important that the
+ * write barrier is invoked here on B, despite the fact that it exists in
+ * the array before and after the move.
+ */
+ if (zone()->needsIncrementalBarrier()) {
+ uint32_t numShifted = getElementsHeader()->numShiftedElements();
+ if (dstStart < srcStart) {
+ HeapSlot* dst = elements_ + dstStart;
+ HeapSlot* src = elements_ + srcStart;
+ for (uint32_t i = 0; i < count; i++, dst++, src++) {
+ dst->set(this, HeapSlot::Element, dst - elements_ + numShifted, *src);
+ }
+ } else {
+ HeapSlot* dst = elements_ + dstStart + count - 1;
+ HeapSlot* src = elements_ + srcStart + count - 1;
+ for (uint32_t i = 0; i < count; i++, dst--, src--) {
+ dst->set(this, HeapSlot::Element, dst - elements_ + numShifted, *src);
+ }
+ }
+ } else {
+ memmove(elements_ + dstStart, elements_ + srcStart,
+ count * sizeof(HeapSlot));
+ elementsRangePostWriteBarrier(dstStart, count);
+ }
+}
+
+inline void NativeObject::reverseDenseElementsNoPreBarrier(uint32_t length) {
+ MOZ_ASSERT(!zone()->needsIncrementalBarrier());
+
+ MOZ_ASSERT(isExtensible());
+
+ MOZ_ASSERT(length > 1);
+ MOZ_ASSERT(length <= getDenseInitializedLength());
+
+ Value* valLo = reinterpret_cast<Value*>(elements_);
+ Value* valHi = valLo + (length - 1);
+ MOZ_ASSERT(valLo < valHi);
+
+ do {
+ Value origLo = *valLo;
+ *valLo = *valHi;
+ *valHi = origLo;
+ ++valLo;
+ --valHi;
+ } while (valLo < valHi);
+
+ elementsRangePostWriteBarrier(0, length);
+}
+
+inline void NativeObject::ensureDenseInitializedLength(uint32_t index,
+ uint32_t extra) {
+ // Ensure that the array's contents have been initialized up to index, and
+ // mark the elements through 'index + extra' as initialized in preparation
+ // for a write.
+
+ MOZ_ASSERT(!denseElementsAreFrozen());
+ MOZ_ASSERT(isExtensible() || (containsDenseElement(index) && extra == 1));
+ MOZ_ASSERT(index + extra <= getDenseCapacity());
+
+ uint32_t initlen = getDenseInitializedLength();
+ if (index + extra <= initlen) {
+ return;
+ }
+
+ MOZ_ASSERT(isExtensible());
+
+ if (index > initlen) {
+ markDenseElementsNotPacked();
+ }
+
+ uint32_t numShifted = getElementsHeader()->numShiftedElements();
+ size_t offset = initlen;
+ for (HeapSlot* sp = elements_ + initlen; sp != elements_ + (index + extra);
+ sp++, offset++) {
+ sp->init(this, HeapSlot::Element, offset + numShifted,
+ MagicValue(JS_ELEMENTS_HOLE));
+ }
+
+ getElementsHeader()->initializedLength = index + extra;
+}
+
+DenseElementResult NativeObject::extendDenseElements(JSContext* cx,
+ uint32_t requiredCapacity,
+ uint32_t extra) {
+ MOZ_ASSERT(isExtensible());
+
+ /*
+ * Don't grow elements for objects which already have sparse indexes.
+ * This avoids needing to count non-hole elements in willBeSparseElements
+ * every time a new index is added.
+ */
+ if (isIndexed()) {
+ return DenseElementResult::Incomplete;
+ }
+
+ /*
+ * We use the extra argument also as a hint about number of non-hole
+ * elements to be inserted.
+ */
+ if (requiredCapacity > MIN_SPARSE_INDEX &&
+ willBeSparseElements(requiredCapacity, extra)) {
+ return DenseElementResult::Incomplete;
+ }
+
+ if (!growElements(cx, requiredCapacity)) {
+ return DenseElementResult::Failure;
+ }
+
+ return DenseElementResult::Success;
+}
+
+inline DenseElementResult NativeObject::ensureDenseElements(JSContext* cx,
+ uint32_t index,
+ uint32_t extra) {
+ MOZ_ASSERT(is<NativeObject>());
+ MOZ_ASSERT(isExtensible() || (containsDenseElement(index) && extra == 1));
+
+ uint32_t requiredCapacity;
+ if (extra == 1) {
+ /* Optimize for the common case. */
+ if (index < getDenseCapacity()) {
+ ensureDenseInitializedLength(index, 1);
+ return DenseElementResult::Success;
+ }
+ requiredCapacity = index + 1;
+ if (requiredCapacity == 0) {
+ /* Overflow. */
+ return DenseElementResult::Incomplete;
+ }
+ } else {
+ requiredCapacity = index + extra;
+ if (requiredCapacity < index) {
+ /* Overflow. */
+ return DenseElementResult::Incomplete;
+ }
+ if (requiredCapacity <= getDenseCapacity()) {
+ ensureDenseInitializedLength(index, extra);
+ return DenseElementResult::Success;
+ }
+ }
+
+ DenseElementResult result = extendDenseElements(cx, requiredCapacity, extra);
+ if (result != DenseElementResult::Success) {
+ return result;
+ }
+
+ ensureDenseInitializedLength(index, extra);
+ return DenseElementResult::Success;
+}
+
+inline DenseElementResult NativeObject::setOrExtendDenseElements(
+ JSContext* cx, uint32_t start, const Value* vp, uint32_t count) {
+ if (!isExtensible()) {
+ return DenseElementResult::Incomplete;
+ }
+
+ if (is<ArrayObject>() && !as<ArrayObject>().lengthIsWritable() &&
+ start + count >= as<ArrayObject>().length()) {
+ return DenseElementResult::Incomplete;
+ }
+
+ DenseElementResult result = ensureDenseElements(cx, start, count);
+ if (result != DenseElementResult::Success) {
+ return result;
+ }
+
+ if (is<ArrayObject>() && start + count >= as<ArrayObject>().length()) {
+ as<ArrayObject>().setLength(start + count);
+ }
+
+ copyDenseElements(start, vp, count);
+ return DenseElementResult::Success;
+}
+
+inline bool NativeObject::isInWholeCellBuffer() const {
+ const gc::TenuredCell* cell = &asTenured();
+ gc::ArenaCellSet* cells = cell->arena()->bufferedCells();
+ return cells && cells->hasCell(cell);
+}
+
+/* static */
+inline NativeObject* NativeObject::create(
+ JSContext* cx, js::gc::AllocKind kind, js::gc::Heap heap,
+ js::Handle<SharedShape*> shape, js::gc::AllocSite* site /* = nullptr */) {
+ debugCheckNewObject(shape, kind, heap);
+
+ const JSClass* clasp = shape->getObjectClass();
+ MOZ_ASSERT(clasp->isNativeObject());
+ MOZ_ASSERT(!clasp->isJSFunction(), "should use JSFunction::create");
+ MOZ_ASSERT(clasp != &ArrayObject::class_, "should use ArrayObject::create");
+
+ const uint32_t nfixed = shape->numFixedSlots();
+ const uint32_t slotSpan = shape->slotSpan();
+ const size_t nDynamicSlots = calculateDynamicSlots(nfixed, slotSpan, clasp);
+
+ NativeObject* nobj = cx->newCell<NativeObject>(kind, heap, clasp, site);
+ if (!nobj) {
+ return nullptr;
+ }
+
+ nobj->initShape(shape);
+ nobj->setEmptyElements();
+
+ if (!nDynamicSlots) {
+ nobj->initEmptyDynamicSlots();
+ } else if (!nobj->allocateInitialSlots(cx, nDynamicSlots)) {
+ return nullptr;
+ }
+
+ if (slotSpan > 0) {
+ nobj->initSlots(nfixed, slotSpan);
+ }
+
+ if (MOZ_UNLIKELY(cx->realm()->hasAllocationMetadataBuilder())) {
+ if (clasp->shouldDelayMetadataBuilder()) {
+ cx->realm()->setObjectPendingMetadata(nobj);
+ } else {
+ nobj = SetNewObjectMetadata(cx, nobj);
+ }
+ }
+
+ js::gc::gcprobes::CreateObject(nobj);
+
+ return nobj;
+}
+
+MOZ_ALWAYS_INLINE void NativeObject::initEmptyDynamicSlots() {
+ setEmptyDynamicSlots(0);
+}
+
+MOZ_ALWAYS_INLINE void NativeObject::setDictionaryModeSlotSpan(uint32_t span) {
+ MOZ_ASSERT(inDictionaryMode());
+
+ if (!hasDynamicSlots()) {
+ setEmptyDynamicSlots(span);
+ return;
+ }
+
+ getSlotsHeader()->setDictionarySlotSpan(span);
+}
+
+MOZ_ALWAYS_INLINE void NativeObject::setEmptyDynamicSlots(
+ uint32_t dictionarySlotSpan) {
+ MOZ_ASSERT_IF(!inDictionaryMode(), dictionarySlotSpan == 0);
+ MOZ_ASSERT(dictionarySlotSpan <= MAX_FIXED_SLOTS);
+
+ slots_ = emptyObjectSlotsForDictionaryObject[dictionarySlotSpan];
+
+ MOZ_ASSERT(getSlotsHeader()->capacity() == 0);
+ MOZ_ASSERT(getSlotsHeader()->dictionarySlotSpan() == dictionarySlotSpan);
+ MOZ_ASSERT(!hasDynamicSlots());
+ MOZ_ASSERT(!hasUniqueId());
+}
+
+MOZ_ALWAYS_INLINE bool NativeObject::setShapeAndAddNewSlots(
+ JSContext* cx, SharedShape* newShape, uint32_t oldSpan, uint32_t newSpan) {
+ MOZ_ASSERT(!inDictionaryMode());
+ MOZ_ASSERT(newShape->isShared());
+ MOZ_ASSERT(newShape->zone() == zone());
+ MOZ_ASSERT(newShape->numFixedSlots() == numFixedSlots());
+ MOZ_ASSERT(newShape->getObjectClass() == getClass());
+
+ MOZ_ASSERT(oldSpan < newSpan);
+ MOZ_ASSERT(sharedShape()->slotSpan() == oldSpan);
+ MOZ_ASSERT(newShape->slotSpan() == newSpan);
+
+ uint32_t numFixed = newShape->numFixedSlots();
+ if (newSpan > numFixed) {
+ uint32_t oldCapacity = numDynamicSlots();
+ uint32_t newCapacity =
+ calculateDynamicSlots(numFixed, newSpan, newShape->getObjectClass());
+ MOZ_ASSERT(oldCapacity <= newCapacity);
+
+ if (oldCapacity < newCapacity) {
+ if (MOZ_UNLIKELY(!growSlots(cx, oldCapacity, newCapacity))) {
+ return false;
+ }
+ }
+ }
+
+ // Initialize slots [oldSpan, newSpan). Use the *Unchecked version because
+ // the shape's slot span does not reflect the allocated slots at this
+ // point.
+ auto initRange = [](HeapSlot* start, HeapSlot* end) {
+ for (HeapSlot* slot = start; slot < end; slot++) {
+ slot->initAsUndefined();
+ }
+ };
+ forEachSlotRangeUnchecked(oldSpan, newSpan, initRange);
+
+ setShape(newShape);
+ return true;
+}
+
+MOZ_ALWAYS_INLINE bool NativeObject::setShapeAndAddNewSlot(
+ JSContext* cx, SharedShape* newShape, uint32_t slot) {
+ MOZ_ASSERT(!inDictionaryMode());
+ MOZ_ASSERT(newShape->isShared());
+ MOZ_ASSERT(newShape->zone() == zone());
+ MOZ_ASSERT(newShape->numFixedSlots() == numFixedSlots());
+
+ MOZ_ASSERT(newShape->base() == shape()->base());
+ MOZ_ASSERT(newShape->slotSpan() == sharedShape()->slotSpan() + 1);
+ MOZ_ASSERT(newShape->slotSpan() == slot + 1);
+
+ uint32_t numFixed = newShape->numFixedSlots();
+ if (slot < numFixed) {
+ initFixedSlot(slot, UndefinedValue());
+ } else {
+ uint32_t dynamicSlotIndex = slot - numFixed;
+ if (dynamicSlotIndex >= numDynamicSlots()) {
+ if (MOZ_UNLIKELY(!growSlotsForNewSlot(cx, numFixed, slot))) {
+ return false;
+ }
+ }
+ initDynamicSlot(numFixed, slot, UndefinedValue());
+ }
+
+ setShape(newShape);
+ return true;
+}
+
+inline js::gc::AllocKind NativeObject::allocKindForTenure() const {
+ using namespace js::gc;
+ AllocKind kind = GetGCObjectFixedSlotsKind(numFixedSlots());
+ MOZ_ASSERT(!IsBackgroundFinalized(kind));
+ if (!CanChangeToBackgroundAllocKind(kind, getClass())) {
+ return kind;
+ }
+ return ForegroundToBackgroundAllocKind(kind);
+}
+
+inline js::GlobalObject& NativeObject::global() const { return nonCCWGlobal(); }
+
+inline bool NativeObject::denseElementsHaveMaybeInIterationFlag() {
+ if (!getElementsHeader()->maybeInIteration()) {
+ AssertDenseElementsNotIterated(this);
+ return false;
+ }
+ return true;
+}
+
+inline bool NativeObject::denseElementsMaybeInIteration() {
+ if (!denseElementsHaveMaybeInIterationFlag()) {
+ return false;
+ }
+ return compartment()->objectMaybeInIteration(this);
+}
+
+/*
+ * Call obj's resolve hook.
+ *
+ * cx and id are the parameters initially passed to the ongoing lookup;
+ * propp and recursedp are its out parameters.
+ *
+ * There are four possible outcomes:
+ *
+ * - On failure, report an error or exception and return false.
+ *
+ * - If we are already resolving a property of obj, call setRecursiveResolve on
+ * propp and return true.
+ *
+ * - If the resolve hook finds or defines the sought property, set propp
+ * appropriately, and return true.
+ *
+ * - Otherwise no property was resolved. Set propp to NotFound and return true.
+ */
+static MOZ_ALWAYS_INLINE bool CallResolveOp(JSContext* cx,
+ Handle<NativeObject*> obj,
+ HandleId id,
+ PropertyResult* propp) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+
+ // Avoid recursion on (obj, id) already being resolved on cx.
+ AutoResolving resolving(cx, obj, id);
+ if (resolving.alreadyStarted()) {
+ // Already resolving id in obj, suppress recursion.
+ propp->setRecursiveResolve();
+ return true;
+ }
+
+ bool resolved = false;
+ AutoRealm ar(cx, obj);
+ if (!obj->getClass()->getResolve()(cx, obj, id, &resolved)) {
+ return false;
+ }
+
+ if (!resolved) {
+ propp->setNotFound();
+ return true;
+ }
+
+ // Assert the mayResolve hook, if there is one, returns true for this
+ // property.
+ MOZ_ASSERT_IF(obj->getClass()->getMayResolve(),
+ obj->getClass()->getMayResolve()(cx->names(), id, obj));
+
+ if (id.isInt()) {
+ uint32_t index = id.toInt();
+ if (obj->containsDenseElement(index)) {
+ propp->setDenseElement(index);
+ return true;
+ }
+ }
+
+ MOZ_ASSERT(!obj->is<TypedArrayObject>());
+
+ mozilla::Maybe<PropertyInfo> prop = obj->lookup(cx, id);
+ if (prop.isSome()) {
+ propp->setNativeProperty(*prop);
+ } else {
+ propp->setNotFound();
+ }
+
+ return true;
+}
+
+enum class LookupResolveMode {
+ IgnoreResolve,
+ CheckResolve,
+ CheckMayResolve,
+};
+
+template <AllowGC allowGC,
+ LookupResolveMode resolveMode = LookupResolveMode::CheckResolve>
+static MOZ_ALWAYS_INLINE bool NativeLookupOwnPropertyInline(
+ JSContext* cx, typename MaybeRooted<NativeObject*, allowGC>::HandleType obj,
+ typename MaybeRooted<jsid, allowGC>::HandleType id, PropertyResult* propp) {
+ // Native objects should should avoid `lookupProperty` hooks, and those that
+ // use them should avoid recursively triggering lookup, and those that still
+ // violate this guidance are the ModuleEnvironmentObject.
+ MOZ_ASSERT_IF(obj->getOpsLookupProperty(),
+ obj->template is<ModuleEnvironmentObject>());
+#ifdef ENABLE_RECORD_TUPLE
+ MOZ_ASSERT(!js::IsExtendedPrimitive(*obj));
+#endif
+
+ // Check for a native dense element.
+ if (id.isInt()) {
+ uint32_t index = id.toInt();
+ if (obj->containsDenseElement(index)) {
+ propp->setDenseElement(index);
+ return true;
+ }
+ }
+
+ // Check for a typed array element. Integer lookups always finish here
+ // so that integer properties on the prototype are ignored even for out
+ // of bounds accesses.
+ if (obj->template is<TypedArrayObject>()) {
+ if (mozilla::Maybe<uint64_t> index = ToTypedArrayIndex(id)) {
+ uint64_t idx = index.value();
+ if (idx < obj->template as<TypedArrayObject>().length()) {
+ propp->setTypedArrayElement(idx);
+ } else {
+ propp->setTypedArrayOutOfRange();
+ }
+ return true;
+ }
+ }
+
+ MOZ_ASSERT(cx->compartment() == obj->compartment());
+
+ // Check for a native property. Call Shape::lookup directly (instead of
+ // NativeObject::lookup) because it's inlined.
+ uint32_t index;
+ if (PropMap* map = obj->shape()->lookup(cx, id, &index)) {
+ propp->setNativeProperty(map->getPropertyInfo(index));
+ return true;
+ }
+
+ // Some callers explicitily want us to ignore the resolve hook entirely. In
+ // that case, we report the property as NotFound.
+ if constexpr (resolveMode == LookupResolveMode::IgnoreResolve) {
+ propp->setNotFound();
+ return true;
+ }
+
+ // JITs in particular use the `mayResolve` hook to determine a JSClass can
+ // never resolve this property name (for all instances of the class).
+ if constexpr (resolveMode == LookupResolveMode::CheckMayResolve) {
+ static_assert(allowGC == false,
+ "CheckMayResolve can only be used with NoGC");
+
+ MOZ_ASSERT(propp->isNotFound());
+ return !ClassMayResolveId(cx->names(), obj->getClass(), id, obj);
+ }
+
+ MOZ_ASSERT(resolveMode == LookupResolveMode::CheckResolve);
+
+ // If there is no resolve hook, the property definitely does not exist.
+ if (obj->getClass()->getResolve()) {
+ if constexpr (!allowGC) {
+ return false;
+ } else {
+ return CallResolveOp(cx, obj, id, propp);
+ }
+ }
+
+ propp->setNotFound();
+ return true;
+}
+
+/*
+ * Simplified version of NativeLookupOwnPropertyInline that doesn't call
+ * resolve hooks.
+ */
+[[nodiscard]] static inline bool NativeLookupOwnPropertyNoResolve(
+ JSContext* cx, NativeObject* obj, jsid id, PropertyResult* result) {
+ return NativeLookupOwnPropertyInline<NoGC, LookupResolveMode::IgnoreResolve>(
+ cx, obj, id, result);
+}
+
+template <AllowGC allowGC,
+ LookupResolveMode resolveMode = LookupResolveMode::CheckResolve>
+static MOZ_ALWAYS_INLINE bool NativeLookupPropertyInline(
+ JSContext* cx, typename MaybeRooted<NativeObject*, allowGC>::HandleType obj,
+ typename MaybeRooted<jsid, allowGC>::HandleType id,
+ typename MaybeRooted<
+ std::conditional_t<allowGC == AllowGC::CanGC, JSObject*, NativeObject*>,
+ allowGC>::MutableHandleType objp,
+ PropertyResult* propp) {
+ /* Search scopes starting with obj and following the prototype link. */
+ typename MaybeRooted<NativeObject*, allowGC>::RootType current(cx, obj);
+
+ while (true) {
+ if (!NativeLookupOwnPropertyInline<allowGC, resolveMode>(cx, current, id,
+ propp)) {
+ return false;
+ }
+
+ if (propp->isFound()) {
+ objp.set(current);
+ return true;
+ }
+
+ if (propp->shouldIgnoreProtoChain()) {
+ break;
+ }
+
+ JSObject* proto = current->staticPrototype();
+ if (!proto) {
+ break;
+ }
+
+ // If a `lookupProperty` hook exists, recurse into LookupProperty, otherwise
+ // we can simply loop within this call frame.
+ if (proto->getOpsLookupProperty()) {
+ if constexpr (allowGC) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ RootedObject protoRoot(cx, proto);
+ return LookupProperty(cx, protoRoot, id, objp, propp);
+ } else {
+ return false;
+ }
+ }
+
+ current = &proto->as<NativeObject>();
+ }
+
+ MOZ_ASSERT(propp->isNotFound());
+ objp.set(nullptr);
+ return true;
+}
+
+inline bool ThrowIfNotConstructing(JSContext* cx, const CallArgs& args,
+ const char* builtinName) {
+ if (args.isConstructing()) {
+ return true;
+ }
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BUILTIN_CTOR_NO_NEW, builtinName);
+ return false;
+}
+
+inline bool IsPackedArray(JSObject* obj) {
+ if (!obj->is<ArrayObject>()) {
+ return false;
+ }
+
+ ArrayObject* arr = &obj->as<ArrayObject>();
+ if (arr->getDenseInitializedLength() != arr->length()) {
+ return false;
+ }
+
+ if (!arr->denseElementsArePacked()) {
+ return false;
+ }
+
+#ifdef DEBUG
+ // Assert correctness of the NON_PACKED flag by checking the first few
+ // elements don't contain holes.
+ uint32_t numToCheck = std::min<uint32_t>(5, arr->getDenseInitializedLength());
+ for (uint32_t i = 0; i < numToCheck; i++) {
+ MOZ_ASSERT(!arr->getDenseElement(i).isMagic(JS_ELEMENTS_HOLE));
+ }
+#endif
+
+ return true;
+}
+
+// Like AddDataProperty but optimized for plain objects. Plain objects don't
+// have an addProperty hook.
+MOZ_ALWAYS_INLINE bool AddDataPropertyToPlainObject(
+ JSContext* cx, Handle<PlainObject*> obj, HandleId id, HandleValue v,
+ uint32_t* resultSlot = nullptr) {
+ MOZ_ASSERT(!id.isInt());
+
+ uint32_t slot;
+ if (!resultSlot) {
+ resultSlot = &slot;
+ }
+ if (!NativeObject::addProperty(
+ cx, obj, id, PropertyFlags::defaultDataPropFlags, resultSlot)) {
+ return false;
+ }
+
+ obj->initSlot(*resultSlot, v);
+
+ MOZ_ASSERT(!obj->getClass()->getAddProperty());
+ return true;
+}
+
+} // namespace js
+
+#endif /* vm_NativeObject_inl_h */
diff --git a/js/src/vm/NativeObject.cpp b/js/src/vm/NativeObject.cpp
new file mode 100644
index 0000000000..a9fd8916ec
--- /dev/null
+++ b/js/src/vm/NativeObject.cpp
@@ -0,0 +1,2854 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/NativeObject-inl.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/Maybe.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "gc/MaybeRooted.h"
+#include "gc/StableCellHasher.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
+#include "js/Value.h"
+#include "vm/EqualityOperations.h" // js::SameValue
+#include "vm/GetterSetter.h" // js::GetterSetter
+#include "vm/Interpreter.h" // js::CallGetter, js::CallSetter
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/TypedArrayObject.h"
+
+#ifdef ENABLE_RECORD_TUPLE
+# include "builtin/RecordObject.h"
+# include "builtin/TupleObject.h"
+# include "vm/RecordTupleShared.h"
+#endif
+
+#include "gc/Nursery-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/Shape-inl.h"
+
+using namespace js;
+
+using JS::AutoCheckCannotGC;
+using mozilla::CheckedInt;
+using mozilla::PodCopy;
+using mozilla::RoundUpPow2;
+
+struct EmptyObjectElements {
+ const ObjectElements emptyElementsHeader;
+
+ // Add an extra (unused) Value to make sure an out-of-bounds index when
+ // masked (resulting in index 0) accesses valid memory.
+ const Value val;
+
+ public:
+ constexpr EmptyObjectElements()
+ : emptyElementsHeader(0, 0), val(UndefinedValue()) {}
+ explicit constexpr EmptyObjectElements(ObjectElements::SharedMemory shmem)
+ : emptyElementsHeader(0, 0, shmem), val(UndefinedValue()) {}
+};
+
+static constexpr EmptyObjectElements emptyElementsHeader;
+
+/* Objects with no elements share one empty set of elements. */
+HeapSlot* const js::emptyObjectElements = reinterpret_cast<HeapSlot*>(
+ uintptr_t(&emptyElementsHeader) + sizeof(ObjectElements));
+
+static constexpr EmptyObjectElements emptyElementsHeaderShared(
+ ObjectElements::SharedMemory::IsShared);
+
+/* Objects with no elements share one empty set of elements. */
+HeapSlot* const js::emptyObjectElementsShared = reinterpret_cast<HeapSlot*>(
+ uintptr_t(&emptyElementsHeaderShared) + sizeof(ObjectElements));
+
+struct EmptyObjectSlots : public ObjectSlots {
+ explicit constexpr EmptyObjectSlots(size_t dictionarySlotSpan)
+ : ObjectSlots(0, dictionarySlotSpan, NoUniqueIdInSharedEmptySlots) {}
+};
+
+static constexpr EmptyObjectSlots emptyObjectSlotsHeaders[17] = {
+ EmptyObjectSlots(0), EmptyObjectSlots(1), EmptyObjectSlots(2),
+ EmptyObjectSlots(3), EmptyObjectSlots(4), EmptyObjectSlots(5),
+ EmptyObjectSlots(6), EmptyObjectSlots(7), EmptyObjectSlots(8),
+ EmptyObjectSlots(9), EmptyObjectSlots(10), EmptyObjectSlots(11),
+ EmptyObjectSlots(12), EmptyObjectSlots(13), EmptyObjectSlots(14),
+ EmptyObjectSlots(15), EmptyObjectSlots(16)};
+
+static_assert(std::size(emptyObjectSlotsHeaders) ==
+ NativeObject::MAX_FIXED_SLOTS + 1);
+
+HeapSlot* const js::emptyObjectSlotsForDictionaryObject[17] = {
+ emptyObjectSlotsHeaders[0].slots(), emptyObjectSlotsHeaders[1].slots(),
+ emptyObjectSlotsHeaders[2].slots(), emptyObjectSlotsHeaders[3].slots(),
+ emptyObjectSlotsHeaders[4].slots(), emptyObjectSlotsHeaders[5].slots(),
+ emptyObjectSlotsHeaders[6].slots(), emptyObjectSlotsHeaders[7].slots(),
+ emptyObjectSlotsHeaders[8].slots(), emptyObjectSlotsHeaders[9].slots(),
+ emptyObjectSlotsHeaders[10].slots(), emptyObjectSlotsHeaders[11].slots(),
+ emptyObjectSlotsHeaders[12].slots(), emptyObjectSlotsHeaders[13].slots(),
+ emptyObjectSlotsHeaders[14].slots(), emptyObjectSlotsHeaders[15].slots(),
+ emptyObjectSlotsHeaders[16].slots()};
+
+static_assert(std::size(emptyObjectSlotsForDictionaryObject) ==
+ NativeObject::MAX_FIXED_SLOTS + 1);
+
+HeapSlot* const js::emptyObjectSlots = emptyObjectSlotsForDictionaryObject[0];
+
+#ifdef DEBUG
+
+bool NativeObject::canHaveNonEmptyElements() {
+ return !this->is<TypedArrayObject>();
+}
+
+#endif // DEBUG
+
+/* static */
+void ObjectElements::PrepareForPreventExtensions(JSContext* cx,
+ NativeObject* obj) {
+ if (!obj->hasEmptyElements()) {
+ obj->shrinkCapacityToInitializedLength(cx);
+ }
+
+ // shrinkCapacityToInitializedLength ensures there are no shifted elements.
+ MOZ_ASSERT(obj->getElementsHeader()->numShiftedElements() == 0);
+}
+
+/* static */
+void ObjectElements::PreventExtensions(NativeObject* obj) {
+ MOZ_ASSERT(!obj->isExtensible());
+ MOZ_ASSERT(obj->getElementsHeader()->numShiftedElements() == 0);
+ MOZ_ASSERT(obj->getDenseInitializedLength() == obj->getDenseCapacity());
+
+ if (!obj->hasEmptyElements()) {
+ obj->getElementsHeader()->setNotExtensible();
+ }
+}
+
+/* static */
+bool ObjectElements::FreezeOrSeal(JSContext* cx, Handle<NativeObject*> obj,
+ IntegrityLevel level) {
+ MOZ_ASSERT_IF(level == IntegrityLevel::Frozen && obj->is<ArrayObject>(),
+ !obj->as<ArrayObject>().lengthIsWritable());
+ MOZ_ASSERT(!obj->isExtensible());
+ MOZ_ASSERT(obj->getElementsHeader()->numShiftedElements() == 0);
+
+ if (obj->hasEmptyElements() || obj->denseElementsAreFrozen()) {
+ return true;
+ }
+
+ if (level == IntegrityLevel::Frozen) {
+ if (!JSObject::setFlag(cx, obj, ObjectFlag::FrozenElements)) {
+ return false;
+ }
+ }
+
+ if (!obj->denseElementsAreSealed()) {
+ obj->getElementsHeader()->seal();
+ }
+
+ if (level == IntegrityLevel::Frozen) {
+ obj->getElementsHeader()->freeze();
+ }
+
+ return true;
+}
+
+#ifdef DEBUG
+static mozilla::Atomic<bool, mozilla::Relaxed> gShapeConsistencyChecksEnabled(
+ false);
+
+/* static */
+void js::NativeObject::enableShapeConsistencyChecks() {
+ gShapeConsistencyChecksEnabled = true;
+}
+
+void js::NativeObject::checkShapeConsistency() {
+ if (!gShapeConsistencyChecksEnabled) {
+ return;
+ }
+
+ MOZ_ASSERT(is<NativeObject>());
+
+ if (PropMap* map = shape()->propMap()) {
+ map->checkConsistency(this);
+ } else {
+ MOZ_ASSERT(shape()->propMapLength() == 0);
+ }
+}
+#endif
+
+#ifdef DEBUG
+
+bool js::NativeObject::slotInRange(uint32_t slot,
+ SentinelAllowed sentinel) const {
+ MOZ_ASSERT(!gc::IsForwarded(shape()));
+ uint32_t capacity = numFixedSlots() + numDynamicSlots();
+ if (sentinel == SENTINEL_ALLOWED) {
+ return slot <= capacity;
+ }
+ return slot < capacity;
+}
+
+bool js::NativeObject::slotIsFixed(uint32_t slot) const {
+ // We call numFixedSlotsMaybeForwarded() to allow reading slots of
+ // associated objects in trace hooks that may be called during a moving GC.
+ return slot < numFixedSlotsMaybeForwarded();
+}
+
+bool js::NativeObject::isNumFixedSlots(uint32_t nfixed) const {
+ // We call numFixedSlotsMaybeForwarded() to allow reading slots of
+ // associated objects in trace hooks that may be called during a moving GC.
+ return nfixed == numFixedSlotsMaybeForwarded();
+}
+
+uint32_t js::NativeObject::outOfLineNumDynamicSlots() const {
+ return numDynamicSlots();
+}
+#endif /* DEBUG */
+
+mozilla::Maybe<PropertyInfo> js::NativeObject::lookup(JSContext* cx, jsid id) {
+ MOZ_ASSERT(is<NativeObject>());
+ uint32_t index;
+ if (PropMap* map = shape()->lookup(cx, id, &index)) {
+ return mozilla::Some(map->getPropertyInfo(index));
+ }
+ return mozilla::Nothing();
+}
+
+mozilla::Maybe<PropertyInfo> js::NativeObject::lookupPure(jsid id) {
+ MOZ_ASSERT(is<NativeObject>());
+ uint32_t index;
+ if (PropMap* map = shape()->lookupPure(id, &index)) {
+ return mozilla::Some(map->getPropertyInfo(index));
+ }
+ return mozilla::Nothing();
+}
+
+bool NativeObject::setUniqueId(JSContext* cx, uint64_t uid) {
+ MOZ_ASSERT(!hasUniqueId());
+ MOZ_ASSERT(!gc::HasUniqueId(this));
+
+ return setOrUpdateUniqueId(cx, uid);
+}
+
+bool NativeObject::setOrUpdateUniqueId(JSContext* cx, uint64_t uid) {
+ if (!hasDynamicSlots() && !allocateSlots(cx, 0)) {
+ return false;
+ }
+
+ getSlotsHeader()->setUniqueId(uid);
+
+ return true;
+}
+
+bool NativeObject::growSlots(JSContext* cx, uint32_t oldCapacity,
+ uint32_t newCapacity) {
+ MOZ_ASSERT(newCapacity > oldCapacity);
+
+ /*
+ * Slot capacities are determined by the span of allocated objects. Due to
+ * the limited number of bits to store shape slots, object growth is
+ * throttled well before the slot capacity can overflow.
+ */
+ NativeObject::slotsSizeMustNotOverflow();
+ MOZ_ASSERT(newCapacity <= MAX_SLOTS_COUNT);
+
+ if (!hasDynamicSlots()) {
+ return allocateSlots(cx, newCapacity);
+ }
+
+ uint64_t uid = maybeUniqueId();
+
+ uint32_t newAllocated = ObjectSlots::allocCount(newCapacity);
+
+ uint32_t dictionarySpan = getSlotsHeader()->dictionarySlotSpan();
+
+ uint32_t oldAllocated = ObjectSlots::allocCount(oldCapacity);
+
+ ObjectSlots* oldHeaderSlots = ObjectSlots::fromSlots(slots_);
+ MOZ_ASSERT(oldHeaderSlots->capacity() == oldCapacity);
+
+ HeapSlot* allocation = ReallocateObjectBuffer<HeapSlot>(
+ cx, this, reinterpret_cast<HeapSlot*>(oldHeaderSlots), oldAllocated,
+ newAllocated);
+ if (!allocation) {
+ return false; /* Leave slots at its old size. */
+ }
+
+ auto* newHeaderSlots =
+ new (allocation) ObjectSlots(newCapacity, dictionarySpan, uid);
+ slots_ = newHeaderSlots->slots();
+
+ Debug_SetSlotRangeToCrashOnTouch(slots_ + oldCapacity,
+ newCapacity - oldCapacity);
+
+ RemoveCellMemory(this, ObjectSlots::allocSize(oldCapacity),
+ MemoryUse::ObjectSlots);
+ AddCellMemory(this, ObjectSlots::allocSize(newCapacity),
+ MemoryUse::ObjectSlots);
+
+ MOZ_ASSERT(hasDynamicSlots());
+ return true;
+}
+
+bool NativeObject::growSlotsForNewSlot(JSContext* cx, uint32_t numFixed,
+ uint32_t slot) {
+ MOZ_ASSERT(slotSpan() == slot);
+ MOZ_ASSERT(shape()->numFixedSlots() == numFixed);
+ MOZ_ASSERT(slot >= numFixed);
+
+ uint32_t newCapacity = calculateDynamicSlots(numFixed, slot + 1, getClass());
+
+ uint32_t oldCapacity = numDynamicSlots();
+ MOZ_ASSERT(oldCapacity < newCapacity);
+
+ return growSlots(cx, oldCapacity, newCapacity);
+}
+
+bool NativeObject::allocateInitialSlots(JSContext* cx, uint32_t capacity) {
+ uint32_t count = ObjectSlots::allocCount(capacity);
+ HeapSlot* allocation = AllocateObjectBuffer<HeapSlot>(cx, this, count);
+ if (!allocation) {
+ // The new object will be unreachable, but we still have to make it safe
+ // for finalization. Also we must check for it during GC compartment
+ // checks (see IsPartiallyInitializedObject).
+ initEmptyDynamicSlots();
+ return false;
+ }
+
+ auto* headerSlots = new (allocation)
+ ObjectSlots(capacity, 0, ObjectSlots::NoUniqueIdInDynamicSlots);
+ slots_ = headerSlots->slots();
+
+ Debug_SetSlotRangeToCrashOnTouch(slots_, capacity);
+
+ if (!IsInsideNursery(this)) {
+ AddCellMemory(this, ObjectSlots::allocSize(capacity),
+ MemoryUse::ObjectSlots);
+ }
+
+ MOZ_ASSERT(hasDynamicSlots());
+ return true;
+}
+
+bool NativeObject::allocateSlots(JSContext* cx, uint32_t newCapacity) {
+ MOZ_ASSERT(!hasUniqueId());
+ MOZ_ASSERT(!hasDynamicSlots());
+
+ uint32_t newAllocated = ObjectSlots::allocCount(newCapacity);
+
+ uint32_t dictionarySpan = getSlotsHeader()->dictionarySlotSpan();
+
+ HeapSlot* allocation = AllocateObjectBuffer<HeapSlot>(cx, this, newAllocated);
+ if (!allocation) {
+ return false;
+ }
+
+ auto* newHeaderSlots = new (allocation) ObjectSlots(
+ newCapacity, dictionarySpan, ObjectSlots::NoUniqueIdInDynamicSlots);
+ slots_ = newHeaderSlots->slots();
+
+ Debug_SetSlotRangeToCrashOnTouch(slots_, newCapacity);
+
+ AddCellMemory(this, ObjectSlots::allocSize(newCapacity),
+ MemoryUse::ObjectSlots);
+
+ MOZ_ASSERT(hasDynamicSlots());
+ return true;
+}
+
+/* static */
+bool NativeObject::growSlotsPure(JSContext* cx, NativeObject* obj,
+ uint32_t newCapacity) {
+ // IC code calls this directly.
+ AutoUnsafeCallWithABI unsafe;
+
+ if (!obj->growSlots(cx, obj->numDynamicSlots(), newCapacity)) {
+ cx->recoverFromOutOfMemory();
+ return false;
+ }
+
+ return true;
+}
+
+/* static */
+bool NativeObject::addDenseElementPure(JSContext* cx, NativeObject* obj) {
+ // IC code calls this directly.
+ AutoUnsafeCallWithABI unsafe;
+
+ MOZ_ASSERT(obj->getDenseInitializedLength() == obj->getDenseCapacity());
+ MOZ_ASSERT(obj->isExtensible());
+ MOZ_ASSERT(!obj->isIndexed());
+ MOZ_ASSERT(!obj->is<TypedArrayObject>());
+ MOZ_ASSERT_IF(obj->is<ArrayObject>(),
+ obj->as<ArrayObject>().lengthIsWritable());
+
+ // growElements will report OOM also if the number of dense elements will
+ // exceed MAX_DENSE_ELEMENTS_COUNT. See goodElementsAllocationAmount.
+ uint32_t oldCapacity = obj->getDenseCapacity();
+ if (MOZ_UNLIKELY(!obj->growElements(cx, oldCapacity + 1))) {
+ cx->recoverFromOutOfMemory();
+ return false;
+ }
+
+ MOZ_ASSERT(obj->getDenseCapacity() > oldCapacity);
+ MOZ_ASSERT(obj->getDenseCapacity() <= MAX_DENSE_ELEMENTS_COUNT);
+ return true;
+}
+
+static inline void FreeSlots(JSContext* cx, NativeObject* obj,
+ ObjectSlots* slots, size_t nbytes) {
+ // Note: this is called when shrinking slots, not from the finalizer.
+ MOZ_ASSERT(cx->isMainThreadContext());
+
+ if (obj->isTenured()) {
+ MOZ_ASSERT(!cx->nursery().isInside(slots));
+ js_free(slots);
+ } else {
+ cx->nursery().freeBuffer(slots, nbytes);
+ }
+}
+
+void NativeObject::shrinkSlots(JSContext* cx, uint32_t oldCapacity,
+ uint32_t newCapacity) {
+ MOZ_ASSERT(hasDynamicSlots());
+ MOZ_ASSERT(newCapacity < oldCapacity);
+ MOZ_ASSERT(oldCapacity == getSlotsHeader()->capacity());
+
+ ObjectSlots* oldHeaderSlots = ObjectSlots::fromSlots(slots_);
+ MOZ_ASSERT(oldHeaderSlots->capacity() == oldCapacity);
+
+ uint64_t uid = maybeUniqueId();
+
+ uint32_t oldAllocated = ObjectSlots::allocCount(oldCapacity);
+
+ if (newCapacity == 0 && uid == 0) {
+ size_t nbytes = ObjectSlots::allocSize(oldCapacity);
+ RemoveCellMemory(this, nbytes, MemoryUse::ObjectSlots);
+ FreeSlots(cx, this, oldHeaderSlots, nbytes);
+ // dictionarySlotSpan is initialized to the correct value by the callers.
+ setEmptyDynamicSlots(0);
+ return;
+ }
+
+ MOZ_ASSERT_IF(!is<ArrayObject>() && !hasUniqueId(),
+ newCapacity >= SLOT_CAPACITY_MIN);
+
+ uint32_t dictionarySpan = getSlotsHeader()->dictionarySlotSpan();
+
+ uint32_t newAllocated = ObjectSlots::allocCount(newCapacity);
+
+ HeapSlot* allocation = ReallocateObjectBuffer<HeapSlot>(
+ cx, this, reinterpret_cast<HeapSlot*>(oldHeaderSlots), oldAllocated,
+ newAllocated);
+ if (!allocation) {
+ // It's possible for realloc to fail when shrinking an allocation. In this
+ // case we continue using the original allocation but still update the
+ // capacity to the new requested capacity, which is smaller than the actual
+ // capacity.
+ cx->recoverFromOutOfMemory();
+ allocation = reinterpret_cast<HeapSlot*>(getSlotsHeader());
+ }
+
+ RemoveCellMemory(this, ObjectSlots::allocSize(oldCapacity),
+ MemoryUse::ObjectSlots);
+ AddCellMemory(this, ObjectSlots::allocSize(newCapacity),
+ MemoryUse::ObjectSlots);
+
+ auto* newHeaderSlots =
+ new (allocation) ObjectSlots(newCapacity, dictionarySpan, uid);
+ slots_ = newHeaderSlots->slots();
+}
+
+void NativeObject::initFixedElements(gc::AllocKind kind, uint32_t length) {
+ uint32_t capacity =
+ gc::GetGCKindSlots(kind) - ObjectElements::VALUES_PER_HEADER;
+
+ setFixedElements();
+ new (getElementsHeader()) ObjectElements(capacity, length);
+ getElementsHeader()->flags |= ObjectElements::FIXED;
+
+ MOZ_ASSERT(hasFixedElements());
+}
+
+bool NativeObject::willBeSparseElements(uint32_t requiredCapacity,
+ uint32_t newElementsHint) {
+ MOZ_ASSERT(is<NativeObject>());
+ MOZ_ASSERT(requiredCapacity > MIN_SPARSE_INDEX);
+
+ uint32_t cap = getDenseCapacity();
+ MOZ_ASSERT(requiredCapacity >= cap);
+
+ if (requiredCapacity > MAX_DENSE_ELEMENTS_COUNT) {
+ return true;
+ }
+
+ uint32_t minimalDenseCount = requiredCapacity / SPARSE_DENSITY_RATIO;
+ if (newElementsHint >= minimalDenseCount) {
+ return false;
+ }
+ minimalDenseCount -= newElementsHint;
+
+ if (minimalDenseCount > cap) {
+ return true;
+ }
+
+ uint32_t len = getDenseInitializedLength();
+ const Value* elems = getDenseElements();
+ for (uint32_t i = 0; i < len; i++) {
+ if (!elems[i].isMagic(JS_ELEMENTS_HOLE) && !--minimalDenseCount) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/* static */
+DenseElementResult NativeObject::maybeDensifySparseElements(
+ JSContext* cx, Handle<NativeObject*> obj) {
+ /*
+ * Wait until after the object goes into dictionary mode, which must happen
+ * when sparsely packing any array with more than MIN_SPARSE_INDEX elements
+ * (see PropertyTree::MAX_HEIGHT).
+ */
+ if (!obj->inDictionaryMode()) {
+ return DenseElementResult::Incomplete;
+ }
+
+ /*
+ * Only measure the number of indexed properties every log(n) times when
+ * populating the object.
+ */
+ uint32_t slotSpan = obj->slotSpan();
+ if (slotSpan != RoundUpPow2(slotSpan)) {
+ return DenseElementResult::Incomplete;
+ }
+
+ /* Watch for conditions under which an object's elements cannot be dense. */
+ if (!obj->isExtensible()) {
+ return DenseElementResult::Incomplete;
+ }
+
+ /*
+ * The indexes in the object need to be sufficiently dense before they can
+ * be converted to dense mode.
+ */
+ uint32_t numDenseElements = 0;
+ uint32_t newInitializedLength = 0;
+
+ for (ShapePropertyIter<NoGC> iter(obj->shape()); !iter.done(); iter++) {
+ uint32_t index;
+ if (!IdIsIndex(iter->key(), &index)) {
+ continue;
+ }
+ if (iter->flags() != PropertyFlags::defaultDataPropFlags) {
+ // For simplicity, only densify the object if all indexed properties can
+ // be converted to dense elements.
+ return DenseElementResult::Incomplete;
+ }
+ MOZ_ASSERT(iter->isDataProperty());
+ numDenseElements++;
+ newInitializedLength = std::max(newInitializedLength, index + 1);
+ }
+
+ if (numDenseElements * SPARSE_DENSITY_RATIO < newInitializedLength) {
+ return DenseElementResult::Incomplete;
+ }
+
+ if (newInitializedLength > MAX_DENSE_ELEMENTS_COUNT) {
+ return DenseElementResult::Incomplete;
+ }
+
+ /*
+ * This object meets all necessary restrictions, convert all indexed
+ * properties into dense elements.
+ */
+
+ if (newInitializedLength > obj->getDenseCapacity()) {
+ if (!obj->growElements(cx, newInitializedLength)) {
+ return DenseElementResult::Failure;
+ }
+ }
+
+ obj->ensureDenseInitializedLength(newInitializedLength, 0);
+
+ if (obj->compartment()->objectMaybeInIteration(obj)) {
+ // Mark the densified elements as maybe-in-iteration. See also the comment
+ // in GetIterator.
+ obj->markDenseElementsMaybeInIteration();
+ }
+
+ if (!NativeObject::densifySparseElements(cx, obj)) {
+ return DenseElementResult::Failure;
+ }
+
+ return DenseElementResult::Success;
+}
+
+void NativeObject::moveShiftedElements() {
+ MOZ_ASSERT(isExtensible());
+
+ ObjectElements* header = getElementsHeader();
+ uint32_t numShifted = header->numShiftedElements();
+ MOZ_ASSERT(numShifted > 0);
+
+ uint32_t initLength = header->initializedLength;
+
+ ObjectElements* newHeader =
+ static_cast<ObjectElements*>(getUnshiftedElementsHeader());
+ memmove(newHeader, header, sizeof(ObjectElements));
+
+ newHeader->clearShiftedElements();
+ newHeader->capacity += numShifted;
+ elements_ = newHeader->elements();
+
+ // To move the elements, temporarily update initializedLength to include
+ // the shifted elements.
+ newHeader->initializedLength += numShifted;
+
+ // Move the elements. Initialize to |undefined| to ensure pre-barriers
+ // don't see garbage.
+ for (size_t i = 0; i < numShifted; i++) {
+ initDenseElement(i, UndefinedValue());
+ }
+ moveDenseElements(0, numShifted, initLength);
+
+ // Restore the initialized length. We use setDenseInitializedLength to
+ // make sure prepareElementRangeForOverwrite is called on the shifted
+ // elements.
+ setDenseInitializedLength(initLength);
+}
+
+void NativeObject::maybeMoveShiftedElements() {
+ MOZ_ASSERT(isExtensible());
+
+ ObjectElements* header = getElementsHeader();
+ MOZ_ASSERT(header->numShiftedElements() > 0);
+
+ // Move the elements if less than a third of the allocated space is in use.
+ if (header->capacity < header->numAllocatedElements() / 3) {
+ moveShiftedElements();
+ }
+}
+
+bool NativeObject::tryUnshiftDenseElements(uint32_t count) {
+ MOZ_ASSERT(isExtensible());
+ MOZ_ASSERT(count > 0);
+
+ ObjectElements* header = getElementsHeader();
+ uint32_t numShifted = header->numShiftedElements();
+
+ if (count > numShifted) {
+ // We need more elements than are easily available. Try to make space
+ // for more elements than we need (and shift the remaining ones) so
+ // that unshifting more elements later will be fast.
+
+ // Don't bother reserving elements if the number of elements is small.
+ // Note that there's no technical reason for using this particular
+ // limit.
+ if (header->initializedLength <= 10 ||
+ header->hasNonwritableArrayLength() ||
+ MOZ_UNLIKELY(count > ObjectElements::MaxShiftedElements)) {
+ return false;
+ }
+
+ MOZ_ASSERT(header->capacity >= header->initializedLength);
+ uint32_t unusedCapacity = header->capacity - header->initializedLength;
+
+ // Determine toShift, the number of extra elements we want to make
+ // available.
+ uint32_t toShift = count - numShifted;
+ MOZ_ASSERT(toShift <= ObjectElements::MaxShiftedElements,
+ "count <= MaxShiftedElements so toShift <= MaxShiftedElements");
+
+ // Give up if we need to allocate more elements.
+ if (toShift > unusedCapacity) {
+ return false;
+ }
+
+ // Move more elements than we need, so that other unshift calls will be
+ // fast. We just have to make sure we don't exceed unusedCapacity.
+ toShift = std::min(toShift + unusedCapacity / 2, unusedCapacity);
+
+ // Ensure |numShifted + toShift| does not exceed MaxShiftedElements.
+ if (numShifted + toShift > ObjectElements::MaxShiftedElements) {
+ toShift = ObjectElements::MaxShiftedElements - numShifted;
+ }
+
+ MOZ_ASSERT(count <= numShifted + toShift);
+ MOZ_ASSERT(numShifted + toShift <= ObjectElements::MaxShiftedElements);
+ MOZ_ASSERT(toShift <= unusedCapacity);
+
+ // Now move/unshift the elements.
+ uint32_t initLen = header->initializedLength;
+ setDenseInitializedLength(initLen + toShift);
+ for (uint32_t i = 0; i < toShift; i++) {
+ initDenseElement(initLen + i, UndefinedValue());
+ }
+ moveDenseElements(toShift, 0, initLen);
+
+ // Shift the elements we just prepended.
+ shiftDenseElementsUnchecked(toShift);
+
+ // We can now fall-through to the fast path below.
+ header = getElementsHeader();
+ MOZ_ASSERT(header->numShiftedElements() == numShifted + toShift);
+
+ numShifted = header->numShiftedElements();
+ MOZ_ASSERT(count <= numShifted);
+ }
+
+ elements_ -= count;
+ ObjectElements* newHeader = getElementsHeader();
+ memmove(newHeader, header, sizeof(ObjectElements));
+
+ newHeader->unshiftShiftedElements(count);
+
+ // Initialize to |undefined| to ensure pre-barriers don't see garbage.
+ for (uint32_t i = 0; i < count; i++) {
+ initDenseElement(i, UndefinedValue());
+ }
+
+ return true;
+}
+
+// Given a requested capacity (in elements) and (potentially) the length of an
+// array for which elements are being allocated, compute an actual allocation
+// amount (in elements). (Allocation amounts include space for an
+// ObjectElements instance, so a return value of |N| implies
+// |N - ObjectElements::VALUES_PER_HEADER| usable elements.)
+//
+// The requested/actual allocation distinction is meant to:
+//
+// * preserve amortized O(N) time to add N elements;
+// * minimize the number of unused elements beyond an array's length, and
+// * provide at least ELEMENT_CAPACITY_MIN elements no matter what (so adding
+// the first several elements to small arrays only needs one allocation).
+//
+// Note: the structure and behavior of this method follow along with
+// UnboxedArrayObject::chooseCapacityIndex. Changes to the allocation strategy
+// in one should generally be matched by the other.
+/* static */
+bool NativeObject::goodElementsAllocationAmount(JSContext* cx,
+ uint32_t reqCapacity,
+ uint32_t length,
+ uint32_t* goodAmount) {
+ if (reqCapacity > MAX_DENSE_ELEMENTS_COUNT) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ uint32_t reqAllocated = reqCapacity + ObjectElements::VALUES_PER_HEADER;
+
+ // Handle "small" requests primarily by doubling.
+ const uint32_t Mebi = 1 << 20;
+ if (reqAllocated < Mebi) {
+ uint32_t amount =
+ mozilla::AssertedCast<uint32_t>(RoundUpPow2(reqAllocated));
+
+ // If |amount| would be 2/3 or more of the array's length, adjust
+ // it (up or down) to be equal to the array's length. This avoids
+ // allocating excess elements that aren't likely to be needed, either
+ // in this resizing or a subsequent one. The 2/3 factor is chosen so
+ // that exceptional resizings will at most triple the capacity, as
+ // opposed to the usual doubling.
+ uint32_t goodCapacity = amount - ObjectElements::VALUES_PER_HEADER;
+ if (length >= reqCapacity && goodCapacity > (length / 3) * 2) {
+ amount = length + ObjectElements::VALUES_PER_HEADER;
+ }
+
+ if (amount < ELEMENT_CAPACITY_MIN) {
+ amount = ELEMENT_CAPACITY_MIN;
+ }
+
+ *goodAmount = amount;
+
+ return true;
+ }
+
+ // The almost-doubling above wastes a lot of space for larger bucket sizes.
+ // For large amounts, switch to bucket sizes that obey this formula:
+ //
+ // count(n+1) = Math.ceil(count(n) * 1.125)
+ //
+ // where |count(n)| is the size of the nth bucket, measured in 2**20 slots.
+ // These bucket sizes still preserve amortized O(N) time to add N elements,
+ // just with a larger constant factor.
+ //
+ // The bucket size table below was generated with this JavaScript (and
+ // manual reformatting):
+ //
+ // for (let n = 1, i = 0; i < 34; i++) {
+ // print('0x' + (n * (1 << 20)).toString(16) + ', ');
+ // n = Math.ceil(n * 1.125);
+ // }
+ static constexpr uint32_t BigBuckets[] = {
+ 0x100000, 0x200000, 0x300000, 0x400000, 0x500000, 0x600000,
+ 0x700000, 0x800000, 0x900000, 0xb00000, 0xd00000, 0xf00000,
+ 0x1100000, 0x1400000, 0x1700000, 0x1a00000, 0x1e00000, 0x2200000,
+ 0x2700000, 0x2c00000, 0x3200000, 0x3900000, 0x4100000, 0x4a00000,
+ 0x5400000, 0x5f00000, 0x6b00000, 0x7900000, 0x8900000, 0x9b00000,
+ 0xaf00000, 0xc500000, 0xde00000, 0xfa00000};
+ static_assert(BigBuckets[std::size(BigBuckets) - 1] <=
+ MAX_DENSE_ELEMENTS_ALLOCATION);
+
+ // Pick the first bucket that'll fit |reqAllocated|.
+ for (uint32_t b : BigBuckets) {
+ if (b >= reqAllocated) {
+ *goodAmount = b;
+ return true;
+ }
+ }
+
+ // Otherwise, return the maximum bucket size.
+ *goodAmount = MAX_DENSE_ELEMENTS_ALLOCATION;
+ return true;
+}
+
+bool NativeObject::growElements(JSContext* cx, uint32_t reqCapacity) {
+ MOZ_ASSERT(isExtensible());
+ MOZ_ASSERT(canHaveNonEmptyElements());
+
+ // If there are shifted elements, consider moving them first. If we don't
+ // move them here, the code below will include the shifted elements in the
+ // resize.
+ uint32_t numShifted = getElementsHeader()->numShiftedElements();
+ if (numShifted > 0) {
+ // If the number of elements is small, it's cheaper to just move them as
+ // it may avoid a malloc/realloc. Note that there's no technical reason
+ // for using this particular value, but it works well in real-world use
+ // cases.
+ static const size_t MaxElementsToMoveEagerly = 20;
+
+ if (getElementsHeader()->initializedLength <= MaxElementsToMoveEagerly) {
+ moveShiftedElements();
+ } else {
+ maybeMoveShiftedElements();
+ }
+ if (getDenseCapacity() >= reqCapacity) {
+ return true;
+ }
+ // moveShiftedElements() may have changed the number of shifted elements;
+ // update `numShifted` accordingly.
+ numShifted = getElementsHeader()->numShiftedElements();
+
+ // If |reqCapacity + numShifted| overflows, we just move all shifted
+ // elements to avoid the problem.
+ CheckedInt<uint32_t> checkedReqCapacity(reqCapacity);
+ checkedReqCapacity += numShifted;
+ if (MOZ_UNLIKELY(!checkedReqCapacity.isValid())) {
+ moveShiftedElements();
+ numShifted = 0;
+ }
+ }
+
+ uint32_t oldCapacity = getDenseCapacity();
+ MOZ_ASSERT(oldCapacity < reqCapacity);
+
+ uint32_t newAllocated = 0;
+ if (is<ArrayObject>() && !as<ArrayObject>().lengthIsWritable()) {
+ // Preserve the |capacity <= length| invariant for arrays with
+ // non-writable length. See also js::ArraySetLength which initially
+ // enforces this requirement.
+ MOZ_ASSERT(reqCapacity <= as<ArrayObject>().length());
+ // Adding to reqCapacity must not overflow uint32_t.
+ MOZ_ASSERT(reqCapacity <= MAX_DENSE_ELEMENTS_COUNT);
+
+ // Then, add the header and shifted elements sizes to the new capacity
+ // to get the overall amount to allocate.
+ newAllocated = reqCapacity + numShifted + ObjectElements::VALUES_PER_HEADER;
+ } else {
+ // For arrays with writable length, and all non-Array objects, call
+ // `NativeObject::goodElementsAllocationAmount()` to determine the
+ // amount to allocate from the the requested capacity and existing length.
+ if (!goodElementsAllocationAmount(cx, reqCapacity + numShifted,
+ getElementsHeader()->length,
+ &newAllocated)) {
+ return false;
+ }
+ }
+
+ // newAllocated now contains the size of the buffer we need to allocate;
+ // subtract off the header and shifted elements size to get the new capacity
+ uint32_t newCapacity =
+ newAllocated - ObjectElements::VALUES_PER_HEADER - numShifted;
+ // If the new capacity isn't strictly greater than the old capacity, then this
+ // method shouldn't have been called; if the new capacity doesn't satisfy
+ // what was requested, then one of the calculations above must have been
+ // wrong.
+ MOZ_ASSERT(newCapacity > oldCapacity && newCapacity >= reqCapacity);
+
+ // If newCapacity exceeds MAX_DENSE_ELEMENTS_COUNT, the array should become
+ // sparse.
+ MOZ_ASSERT(newCapacity <= MAX_DENSE_ELEMENTS_COUNT);
+
+ uint32_t initlen = getDenseInitializedLength();
+
+ HeapSlot* oldHeaderSlots =
+ reinterpret_cast<HeapSlot*>(getUnshiftedElementsHeader());
+ HeapSlot* newHeaderSlots;
+ uint32_t oldAllocated = 0;
+ if (hasDynamicElements()) {
+ // If the object has dynamic elements, then we might be able to resize the
+ // buffer in-place.
+
+ // First, check that adding to oldCapacity won't overflow uint32_t
+ MOZ_ASSERT(oldCapacity <= MAX_DENSE_ELEMENTS_COUNT);
+ // Then, add the header and shifted elements sizes to get the overall size
+ // of the existing buffer
+ oldAllocated = oldCapacity + ObjectElements::VALUES_PER_HEADER + numShifted;
+
+ // Finally, try to resize the buffer.
+ newHeaderSlots = ReallocateObjectBuffer<HeapSlot>(
+ cx, this, oldHeaderSlots, oldAllocated, newAllocated);
+ if (!newHeaderSlots) {
+ return false; // If the resizing failed, then we leave elements at its
+ // old size.
+ }
+ } else {
+ // If the object has fixed elements, then we always need to allocate a new
+ // buffer, because if we've reached this code, then the requested capacity
+ // is greater than the existing inline space available within the object
+ newHeaderSlots = AllocateObjectBuffer<HeapSlot>(cx, this, newAllocated);
+ if (!newHeaderSlots) {
+ return false; // Leave elements at its old size.
+ }
+
+ // Copy the initialized elements into the new buffer,
+ PodCopy(newHeaderSlots, oldHeaderSlots,
+ ObjectElements::VALUES_PER_HEADER + initlen + numShifted);
+ }
+
+ // If the object already had dynamic elements, then we have to account
+ // for freeing the old elements buffer.
+ if (oldAllocated) {
+ RemoveCellMemory(this, oldAllocated * sizeof(HeapSlot),
+ MemoryUse::ObjectElements);
+ }
+
+ ObjectElements* newheader = reinterpret_cast<ObjectElements*>(newHeaderSlots);
+ // Update the elements pointer to point to the new elements buffer.
+ elements_ = newheader->elements() + numShifted;
+
+ // Clear the "fixed elements" flag, because if this code has been reached,
+ // this object now has dynamic elements.
+ getElementsHeader()->flags &= ~ObjectElements::FIXED;
+ getElementsHeader()->capacity = newCapacity;
+
+ // Poison the uninitialized portion of the new elements buffer.
+ Debug_SetSlotRangeToCrashOnTouch(elements_ + initlen, newCapacity - initlen);
+
+ // Account for allocating the new elements buffer.
+ AddCellMemory(this, newAllocated * sizeof(HeapSlot),
+ MemoryUse::ObjectElements);
+
+ return true;
+}
+
+void NativeObject::shrinkElements(JSContext* cx, uint32_t reqCapacity) {
+ MOZ_ASSERT(canHaveNonEmptyElements());
+ MOZ_ASSERT(reqCapacity >= getDenseInitializedLength());
+
+ if (!hasDynamicElements()) {
+ return;
+ }
+
+ // If we have shifted elements, consider moving them.
+ uint32_t numShifted = getElementsHeader()->numShiftedElements();
+ if (numShifted > 0) {
+ maybeMoveShiftedElements();
+ numShifted = getElementsHeader()->numShiftedElements();
+ }
+
+ uint32_t oldCapacity = getDenseCapacity();
+ MOZ_ASSERT(reqCapacity < oldCapacity);
+
+ uint32_t newAllocated = 0;
+ MOZ_ALWAYS_TRUE(goodElementsAllocationAmount(cx, reqCapacity + numShifted, 0,
+ &newAllocated));
+ MOZ_ASSERT(oldCapacity <= MAX_DENSE_ELEMENTS_COUNT);
+
+ uint32_t oldAllocated =
+ oldCapacity + ObjectElements::VALUES_PER_HEADER + numShifted;
+ if (newAllocated == oldAllocated) {
+ return; // Leave elements at its old size.
+ }
+
+ MOZ_ASSERT(newAllocated > ObjectElements::VALUES_PER_HEADER);
+ uint32_t newCapacity =
+ newAllocated - ObjectElements::VALUES_PER_HEADER - numShifted;
+ MOZ_ASSERT(newCapacity <= MAX_DENSE_ELEMENTS_COUNT);
+
+ HeapSlot* oldHeaderSlots =
+ reinterpret_cast<HeapSlot*>(getUnshiftedElementsHeader());
+ HeapSlot* newHeaderSlots = ReallocateObjectBuffer<HeapSlot>(
+ cx, this, oldHeaderSlots, oldAllocated, newAllocated);
+ if (!newHeaderSlots) {
+ cx->recoverFromOutOfMemory();
+ return; // Leave elements at its old size.
+ }
+
+ RemoveCellMemory(this, oldAllocated * sizeof(HeapSlot),
+ MemoryUse::ObjectElements);
+
+ ObjectElements* newheader = reinterpret_cast<ObjectElements*>(newHeaderSlots);
+ elements_ = newheader->elements() + numShifted;
+ getElementsHeader()->capacity = newCapacity;
+
+ AddCellMemory(this, newAllocated * sizeof(HeapSlot),
+ MemoryUse::ObjectElements);
+}
+
+void NativeObject::shrinkCapacityToInitializedLength(JSContext* cx) {
+ // When an array's length becomes non-writable, writes to indexes greater
+ // greater than or equal to the length don't change the array. We handle this
+ // with a check for non-writable length in most places. But in JIT code every
+ // check counts -- so we piggyback the check on the already-required range
+ // check for |index < capacity| by making capacity of arrays with non-writable
+ // length never exceed the length. This mechanism is also used when an object
+ // becomes non-extensible.
+
+ if (getElementsHeader()->numShiftedElements() > 0) {
+ moveShiftedElements();
+ }
+
+ ObjectElements* header = getElementsHeader();
+ uint32_t len = header->initializedLength;
+ MOZ_ASSERT(header->capacity >= len);
+ if (header->capacity == len) {
+ return;
+ }
+
+ shrinkElements(cx, len);
+
+ header = getElementsHeader();
+ uint32_t oldAllocated = header->numAllocatedElements();
+ header->capacity = len;
+
+ // The size of the memory allocation hasn't changed but we lose the actual
+ // capacity information. Make the associated size match the updated capacity.
+ if (!hasFixedElements()) {
+ uint32_t newAllocated = header->numAllocatedElements();
+ RemoveCellMemory(this, oldAllocated * sizeof(HeapSlot),
+ MemoryUse::ObjectElements);
+ AddCellMemory(this, newAllocated * sizeof(HeapSlot),
+ MemoryUse::ObjectElements);
+ }
+}
+
+/* static */
+bool NativeObject::allocDictionarySlot(JSContext* cx, Handle<NativeObject*> obj,
+ uint32_t* slotp) {
+ MOZ_ASSERT(obj->inDictionaryMode());
+
+ uint32_t slotSpan = obj->slotSpan();
+ MOZ_ASSERT(slotSpan >= JSSLOT_FREE(obj->getClass()));
+
+ // Try to pull a free slot from the slot-number free list.
+ DictionaryPropMap* map = obj->dictionaryShape()->propMap();
+ uint32_t last = map->freeList();
+ if (last != SHAPE_INVALID_SLOT) {
+#ifdef DEBUG
+ MOZ_ASSERT(last < slotSpan);
+ uint32_t next = obj->getSlot(last).toPrivateUint32();
+ MOZ_ASSERT_IF(next != SHAPE_INVALID_SLOT, next < slotSpan);
+#endif
+ *slotp = last;
+ const Value& vref = obj->getSlot(last);
+ map->setFreeList(vref.toPrivateUint32());
+ obj->setSlot(last, UndefinedValue());
+ return true;
+ }
+
+ if (MOZ_UNLIKELY(slotSpan >= SHAPE_MAXIMUM_SLOT)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ *slotp = slotSpan;
+
+ uint32_t numFixed = obj->numFixedSlots();
+ if (slotSpan < numFixed) {
+ obj->initFixedSlot(slotSpan, UndefinedValue());
+ obj->setDictionaryModeSlotSpan(slotSpan + 1);
+ return true;
+ }
+
+ uint32_t dynamicSlotIndex = slotSpan - numFixed;
+ if (dynamicSlotIndex >= obj->numDynamicSlots()) {
+ if (MOZ_UNLIKELY(!obj->growSlotsForNewSlot(cx, numFixed, slotSpan))) {
+ return false;
+ }
+ }
+ obj->initDynamicSlot(numFixed, slotSpan, UndefinedValue());
+ obj->setDictionaryModeSlotSpan(slotSpan + 1);
+ return true;
+}
+
+void NativeObject::freeDictionarySlot(uint32_t slot) {
+ MOZ_ASSERT(inDictionaryMode());
+ MOZ_ASSERT(slot < slotSpan());
+
+ DictionaryPropMap* map = dictionaryShape()->propMap();
+ uint32_t last = map->freeList();
+
+ // Can't afford to check the whole free list, but let's check the head.
+ MOZ_ASSERT_IF(last != SHAPE_INVALID_SLOT, last < slotSpan() && last != slot);
+
+ // Place all freed slots other than reserved slots (bug 595230) on the
+ // dictionary's free list.
+ if (JSSLOT_FREE(getClass()) <= slot) {
+ MOZ_ASSERT_IF(last != SHAPE_INVALID_SLOT, last < slotSpan());
+ setSlot(slot, PrivateUint32Value(last));
+ map->setFreeList(slot);
+ } else {
+ setSlot(slot, UndefinedValue());
+ }
+}
+
+template <AllowGC allowGC>
+bool js::NativeLookupOwnProperty(
+ JSContext* cx, typename MaybeRooted<NativeObject*, allowGC>::HandleType obj,
+ typename MaybeRooted<jsid, allowGC>::HandleType id, PropertyResult* propp) {
+ return NativeLookupOwnPropertyInline<allowGC>(cx, obj, id, propp);
+}
+
+template bool js::NativeLookupOwnProperty<CanGC>(JSContext* cx,
+ Handle<NativeObject*> obj,
+ HandleId id,
+ PropertyResult* propp);
+
+template bool js::NativeLookupOwnProperty<NoGC>(JSContext* cx,
+ NativeObject* const& obj,
+ const jsid& id,
+ PropertyResult* propp);
+
+/*** [[DefineOwnProperty]] **************************************************/
+
+static bool CallJSAddPropertyOp(JSContext* cx, JSAddPropertyOp op,
+ HandleObject obj, HandleId id, HandleValue v) {
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return false;
+ }
+
+ cx->check(obj, id, v);
+ return op(cx, obj, id, v);
+}
+
+static MOZ_ALWAYS_INLINE bool CallAddPropertyHook(JSContext* cx,
+ Handle<NativeObject*> obj,
+ HandleId id,
+ HandleValue value) {
+ JSAddPropertyOp addProperty = obj->getClass()->getAddProperty();
+ if (MOZ_UNLIKELY(addProperty)) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+
+ if (!CallJSAddPropertyOp(cx, addProperty, obj, id, value)) {
+ NativeObject::removeProperty(cx, obj, id);
+ return false;
+ }
+ }
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool CallAddPropertyHookDense(
+ JSContext* cx, Handle<NativeObject*> obj, uint32_t index,
+ HandleValue value) {
+ // Inline addProperty for array objects.
+ if (obj->is<ArrayObject>()) {
+ ArrayObject* arr = &obj->as<ArrayObject>();
+ uint32_t length = arr->length();
+ if (index >= length) {
+ arr->setLength(index + 1);
+ }
+ return true;
+ }
+
+ JSAddPropertyOp addProperty = obj->getClass()->getAddProperty();
+ if (MOZ_UNLIKELY(addProperty)) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+
+ RootedId id(cx, PropertyKey::Int(index));
+ if (!CallJSAddPropertyOp(cx, addProperty, obj, id, value)) {
+ obj->setDenseElementHole(index);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * Determines whether a write to the given element on |arr| should fail
+ * because |arr| has a non-writable length, and writing that element would
+ * increase the length of the array.
+ */
+static bool WouldDefinePastNonwritableLength(ArrayObject* arr, uint32_t index) {
+ return !arr->lengthIsWritable() && index >= arr->length();
+}
+
+static bool ChangeProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, HandleObject getter,
+ HandleObject setter, PropertyFlags flags,
+ PropertyResult* existing, uint32_t* slotOut) {
+ MOZ_ASSERT(existing);
+
+ Rooted<GetterSetter*> gs(cx);
+
+ // If we're redefining a getter/setter property but the getter and setter
+ // objects are still the same, use the existing GetterSetter.
+ if (existing->isNativeProperty()) {
+ PropertyInfo prop = existing->propertyInfo();
+ if (prop.isAccessorProperty()) {
+ GetterSetter* current = obj->getGetterSetter(prop);
+ if (current->getter() == getter && current->setter() == setter) {
+ gs = current;
+ }
+ }
+ }
+
+ if (!gs) {
+ gs = GetterSetter::create(cx, getter, setter);
+ if (!gs) {
+ return false;
+ }
+ }
+
+ if (existing->isNativeProperty()) {
+ if (!NativeObject::changeProperty(cx, obj, id, flags, slotOut)) {
+ return false;
+ }
+ } else {
+ if (!NativeObject::addProperty(cx, obj, id, flags, slotOut)) {
+ return false;
+ }
+ }
+
+ obj->setSlot(*slotOut, PrivateGCThingValue(gs));
+ return true;
+}
+
+static PropertyFlags ComputePropertyFlags(const PropertyDescriptor& desc) {
+ desc.assertComplete();
+
+ PropertyFlags flags;
+ flags.setFlag(PropertyFlag::Configurable, desc.configurable());
+ flags.setFlag(PropertyFlag::Enumerable, desc.enumerable());
+
+ if (desc.isDataDescriptor()) {
+ flags.setFlag(PropertyFlag::Writable, desc.writable());
+ } else {
+ MOZ_ASSERT(desc.isAccessorDescriptor());
+ flags.setFlag(PropertyFlag::AccessorProperty);
+ }
+
+ return flags;
+}
+
+// Whether we're adding a new property or changing an existing property (this
+// can be either a property stored in the shape tree or a dense element).
+enum class IsAddOrChange { Add, Change };
+
+template <IsAddOrChange AddOrChange>
+static MOZ_ALWAYS_INLINE bool AddOrChangeProperty(
+ JSContext* cx, Handle<NativeObject*> obj, HandleId id,
+ Handle<PropertyDescriptor> desc, PropertyResult* existing = nullptr) {
+ desc.assertComplete();
+
+#ifdef DEBUG
+ if constexpr (AddOrChange == IsAddOrChange::Add) {
+ MOZ_ASSERT(existing == nullptr);
+ MOZ_ASSERT(!obj->containsPure(id));
+ } else {
+ static_assert(AddOrChange == IsAddOrChange::Change);
+ MOZ_ASSERT(existing);
+ MOZ_ASSERT(existing->isNativeProperty() || existing->isDenseElement());
+ }
+#endif
+
+ // Use dense storage for indexed properties where possible: when we have an
+ // integer key with default property attributes and are either adding a new
+ // property or changing a dense element.
+ PropertyFlags flags = ComputePropertyFlags(desc);
+ if (id.isInt() && flags == PropertyFlags::defaultDataPropFlags &&
+ (AddOrChange == IsAddOrChange::Add || existing->isDenseElement())) {
+ MOZ_ASSERT(!desc.isAccessorDescriptor());
+ MOZ_ASSERT(!obj->is<TypedArrayObject>());
+ uint32_t index = id.toInt();
+ DenseElementResult edResult = obj->ensureDenseElements(cx, index, 1);
+ if (edResult == DenseElementResult::Failure) {
+ return false;
+ }
+ if (edResult == DenseElementResult::Success) {
+ obj->setDenseElement(index, desc.value());
+ if (!CallAddPropertyHookDense(cx, obj, index, desc.value())) {
+ return false;
+ }
+ return true;
+ }
+ }
+
+ uint32_t slot;
+ if constexpr (AddOrChange == IsAddOrChange::Add) {
+ if (desc.isAccessorDescriptor()) {
+ Rooted<GetterSetter*> gs(
+ cx, GetterSetter::create(cx, desc.getter(), desc.setter()));
+ if (!gs) {
+ return false;
+ }
+ if (!NativeObject::addProperty(cx, obj, id, flags, &slot)) {
+ return false;
+ }
+ obj->initSlot(slot, PrivateGCThingValue(gs));
+ } else {
+ if (!NativeObject::addProperty(cx, obj, id, flags, &slot)) {
+ return false;
+ }
+ obj->initSlot(slot, desc.value());
+ }
+ } else {
+ if (desc.isAccessorDescriptor()) {
+ if (!ChangeProperty(cx, obj, id, desc.getter(), desc.setter(), flags,
+ existing, &slot)) {
+ return false;
+ }
+ } else {
+ if (existing->isNativeProperty()) {
+ if (!NativeObject::changeProperty(cx, obj, id, flags, &slot)) {
+ return false;
+ }
+ } else {
+ if (!NativeObject::addProperty(cx, obj, id, flags, &slot)) {
+ return false;
+ }
+ }
+ obj->setSlot(slot, desc.value());
+ }
+ }
+
+ MOZ_ASSERT(slot < obj->slotSpan());
+
+ // Clear any existing dense index after adding a sparse indexed property,
+ // and investigate converting the object to dense indexes.
+ if (id.isInt()) {
+ uint32_t index = id.toInt();
+ if constexpr (AddOrChange == IsAddOrChange::Add) {
+ MOZ_ASSERT(!obj->containsDenseElement(index));
+ } else {
+ obj->removeDenseElementForSparseIndex(index);
+ }
+ // Only try to densify sparse elements if the property we just added/changed
+ // is in the last slot. This avoids a perf cliff in pathological cases: in
+ // maybeDensifySparseElements we densify if the slot span is a power-of-two,
+ // but if we get slots from the free list, the slot span will stay the same
+ // until the free list is empty. This means we'd get quadratic behavior by
+ // trying to densify for each sparse element we add. See bug 1782487.
+ if (slot == obj->slotSpan() - 1) {
+ DenseElementResult edResult =
+ NativeObject::maybeDensifySparseElements(cx, obj);
+ if (edResult == DenseElementResult::Failure) {
+ return false;
+ }
+ if (edResult == DenseElementResult::Success) {
+ MOZ_ASSERT(!desc.isAccessorDescriptor());
+ return CallAddPropertyHookDense(cx, obj, index, desc.value());
+ }
+ }
+ }
+
+ if (desc.isDataDescriptor()) {
+ return CallAddPropertyHook(cx, obj, id, desc.value());
+ }
+
+ return CallAddPropertyHook(cx, obj, id, UndefinedHandleValue);
+}
+
+// Versions of AddOrChangeProperty optimized for adding a plain data property.
+// This function doesn't handle integer ids as we may have to store them in
+// dense elements.
+static MOZ_ALWAYS_INLINE bool AddDataProperty(JSContext* cx,
+ Handle<NativeObject*> obj,
+ HandleId id, HandleValue v) {
+ MOZ_ASSERT(!id.isInt());
+
+ uint32_t slot;
+ if (!NativeObject::addProperty(cx, obj, id,
+ PropertyFlags::defaultDataPropFlags, &slot)) {
+ return false;
+ }
+
+ obj->initSlot(slot, v);
+
+ return CallAddPropertyHook(cx, obj, id, v);
+}
+
+bool js::AddSlotAndCallAddPropHook(JSContext* cx, Handle<NativeObject*> obj,
+ HandleValue v, Handle<Shape*> newShape) {
+ MOZ_ASSERT(obj->getClass()->getAddProperty());
+ MOZ_ASSERT(newShape->asShared().lastProperty().isDataProperty());
+
+ RootedId id(cx, newShape->asShared().lastProperty().key());
+ MOZ_ASSERT(!id.isInt());
+
+ uint32_t slot = newShape->asShared().lastProperty().slot();
+ if (!obj->setShapeAndAddNewSlot(cx, &newShape->asShared(), slot)) {
+ return false;
+ }
+ obj->initSlot(slot, v);
+
+ return CallAddPropertyHook(cx, obj, id, v);
+}
+
+static bool IsAccessorDescriptor(const PropertyResult& prop) {
+ if (prop.isNativeProperty()) {
+ return prop.propertyInfo().isAccessorProperty();
+ }
+
+ MOZ_ASSERT(prop.isDenseElement() || prop.isTypedArrayElement());
+ return false;
+}
+
+static bool IsDataDescriptor(const PropertyResult& prop) {
+ return !IsAccessorDescriptor(prop);
+}
+
+static bool GetCustomDataProperty(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleValue vp);
+
+static bool GetExistingDataProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, const PropertyResult& prop,
+ MutableHandleValue vp) {
+ if (prop.isDenseElement()) {
+ vp.set(obj->getDenseElement(prop.denseElementIndex()));
+ return true;
+ }
+ if (prop.isTypedArrayElement()) {
+ size_t idx = prop.typedArrayElementIndex();
+ return obj->as<TypedArrayObject>().getElement<CanGC>(cx, idx, vp);
+ }
+
+ PropertyInfo propInfo = prop.propertyInfo();
+ if (propInfo.isDataProperty()) {
+ vp.set(obj->getSlot(propInfo.slot()));
+ return true;
+ }
+
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ MOZ_RELEASE_ASSERT(propInfo.isCustomDataProperty());
+ return GetCustomDataProperty(cx, obj, id, vp);
+}
+
+/*
+ * If desc is redundant with an existing own property obj[id], then set
+ * |*redundant = true| and return true.
+ */
+static bool DefinePropertyIsRedundant(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, const PropertyResult& prop,
+ JS::PropertyAttributes attrs,
+ Handle<PropertyDescriptor> desc,
+ bool* redundant) {
+ *redundant = false;
+
+ if (desc.hasConfigurable() && desc.configurable() != attrs.configurable()) {
+ return true;
+ }
+ if (desc.hasEnumerable() && desc.enumerable() != attrs.enumerable()) {
+ return true;
+ }
+ if (desc.isDataDescriptor()) {
+ if (IsAccessorDescriptor(prop)) {
+ return true;
+ }
+ if (desc.hasWritable() && desc.writable() != attrs.writable()) {
+ return true;
+ }
+ if (desc.hasValue()) {
+ // Get the current value of the existing property.
+ RootedValue currentValue(cx);
+ if (!GetExistingDataProperty(cx, obj, id, prop, &currentValue)) {
+ return false;
+ }
+
+ // Don't call SameValue here to ensure we properly update distinct
+ // NaN values.
+ if (desc.value() != currentValue) {
+ return true;
+ }
+ }
+
+ // Check for custom data properties for ArrayObject/ArgumentsObject.
+ // PropertyDescriptor can't represent these properties so they're never
+ // redundant.
+ if (prop.isNativeProperty() && prop.propertyInfo().isCustomDataProperty()) {
+ return true;
+ }
+ } else if (desc.isAccessorDescriptor()) {
+ if (!prop.isNativeProperty()) {
+ return true;
+ }
+ PropertyInfo propInfo = prop.propertyInfo();
+ if (desc.hasGetter() && (!propInfo.isAccessorProperty() ||
+ desc.getter() != obj->getGetter(propInfo))) {
+ return true;
+ }
+ if (desc.hasSetter() && (!propInfo.isAccessorProperty() ||
+ desc.setter() != obj->getSetter(propInfo))) {
+ return true;
+ }
+ }
+
+ *redundant = true;
+ return true;
+}
+
+bool js::NativeDefineProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, Handle<PropertyDescriptor> desc_,
+ ObjectOpResult& result) {
+ desc_.assertValid();
+
+ // Section numbers and step numbers below refer to ES2018, draft rev
+ // 540b827fccf6122a984be99ab9af7be20e3b5562.
+ //
+ // This function aims to implement 9.1.6 [[DefineOwnProperty]] as well as
+ // the [[DefineOwnProperty]] methods described in 9.4.2.1 (arrays), 9.4.4.2
+ // (arguments), and 9.4.5.3 (typed array views).
+
+ // Dispense with custom behavior of exotic native objects first.
+ if (obj->is<ArrayObject>()) {
+ // 9.4.2.1 step 2. Redefining an array's length is very special.
+ Rooted<ArrayObject*> arr(cx, &obj->as<ArrayObject>());
+ if (id == NameToId(cx->names().length)) {
+ // 9.1.6.3 ValidateAndApplyPropertyDescriptor, step 7.a.
+ if (desc_.isAccessorDescriptor()) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ return ArraySetLength(cx, arr, id, desc_, result);
+ }
+
+ // 9.4.2.1 step 3. Don't extend a fixed-length array.
+ uint32_t index;
+ if (IdIsIndex(id, &index)) {
+ if (WouldDefinePastNonwritableLength(arr, index)) {
+ return result.fail(JSMSG_CANT_DEFINE_PAST_ARRAY_LENGTH);
+ }
+ }
+ } else if (obj->is<TypedArrayObject>()) {
+ // 9.4.5.3 step 3. Indexed properties of typed arrays are special.
+ if (mozilla::Maybe<uint64_t> index = ToTypedArrayIndex(id)) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ Rooted<TypedArrayObject*> tobj(cx, &obj->as<TypedArrayObject>());
+ return DefineTypedArrayElement(cx, tobj, index.value(), desc_, result);
+ }
+ } else if (obj->is<ArgumentsObject>()) {
+ Rooted<ArgumentsObject*> argsobj(cx, &obj->as<ArgumentsObject>());
+ if (id.isAtom(cx->names().length)) {
+ // Either we are resolving the .length property on this object,
+ // or redefining it. In the latter case only, we must reify the
+ // property.
+ if (!desc_.resolving()) {
+ if (!ArgumentsObject::reifyLength(cx, argsobj)) {
+ return false;
+ }
+ }
+ } else if (id.isAtom(cx->names().callee) &&
+ argsobj->is<MappedArgumentsObject>()) {
+ // Do same thing as .length for .callee on MappedArgumentsObject.
+ if (!desc_.resolving()) {
+ Rooted<MappedArgumentsObject*> mapped(
+ cx, &argsobj->as<MappedArgumentsObject>());
+ if (!MappedArgumentsObject::reifyCallee(cx, mapped)) {
+ return false;
+ }
+ }
+ } else if (id.isWellKnownSymbol(JS::SymbolCode::iterator)) {
+ // Do same thing as .length for [@@iterator].
+ if (!desc_.resolving()) {
+ if (!ArgumentsObject::reifyIterator(cx, argsobj)) {
+ return false;
+ }
+ }
+ } else if (id.isInt()) {
+ if (!desc_.resolving()) {
+ argsobj->markElementOverridden();
+ }
+ }
+ }
+
+ // 9.1.6.1 OrdinaryDefineOwnProperty step 1.
+ PropertyResult prop;
+ if (desc_.resolving()) {
+ // We are being called from a resolve or enumerate hook to reify a
+ // lazily-resolved property. To avoid reentering the resolve hook and
+ // recursing forever, skip the resolve hook when doing this lookup.
+ if (!NativeLookupOwnPropertyNoResolve(cx, obj, id, &prop)) {
+ return false;
+ }
+ } else {
+ if (!NativeLookupOwnProperty<CanGC>(cx, obj, id, &prop)) {
+ return false;
+ }
+ }
+
+ // From this point, the step numbers refer to
+ // 9.1.6.3, ValidateAndApplyPropertyDescriptor.
+ // Step 1 is a redundant assertion.
+
+ // Filling in desc: Here we make a copy of the desc_ argument. We will turn
+ // it into a complete descriptor before updating obj. The spec algorithm
+ // does not explicitly do this, but the end result is the same. Search for
+ // "fill in" below for places where the filling-in actually occurs.
+ Rooted<PropertyDescriptor> desc(cx, desc_);
+
+ // Step 2.
+ if (prop.isNotFound()) {
+ // Note: We are sharing the property definition machinery with private
+ // fields. Private fields may be added to non-extensible objects.
+ if (!obj->isExtensible() && !id.isPrivateName() &&
+ // R&T wrappers are non-extensible, but we still want to be able to
+ // lazily resolve their properties. We can special-case them to
+ // allow doing so.
+ IF_RECORD_TUPLE(
+ !(IsExtendedPrimitiveWrapper(*obj) && desc_.resolving()), true)) {
+ return result.fail(JSMSG_CANT_DEFINE_PROP_OBJECT_NOT_EXTENSIBLE);
+ }
+
+ // Fill in missing desc fields with defaults.
+ CompletePropertyDescriptor(&desc);
+
+ if (!AddOrChangeProperty<IsAddOrChange::Add>(cx, obj, id, desc)) {
+ return false;
+ }
+ return result.succeed();
+ }
+
+ // Step 3 and 7.a.i.3, 8.a.iii, 10 (partially). Prop might not actually
+ // have a real shape, e.g. in the case of typed array elements,
+ // GetPropertyAttributes is used to paper-over that difference.
+ JS::PropertyAttributes attrs = GetPropertyAttributes(obj, prop);
+ bool redundant;
+ if (!DefinePropertyIsRedundant(cx, obj, id, prop, attrs, desc, &redundant)) {
+ return false;
+ }
+ if (redundant) {
+ return result.succeed();
+ }
+
+ // Step 4.
+ if (!attrs.configurable()) {
+ if (desc.hasConfigurable() && desc.configurable()) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+ if (desc.hasEnumerable() && desc.enumerable() != attrs.enumerable()) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+ }
+
+ // Fill in desc.[[Configurable]] and desc.[[Enumerable]] if missing.
+ if (!desc.hasConfigurable()) {
+ desc.setConfigurable(attrs.configurable());
+ }
+ if (!desc.hasEnumerable()) {
+ desc.setEnumerable(attrs.enumerable());
+ }
+
+ // Steps 5-8.
+ if (desc.isGenericDescriptor()) {
+ // Step 5. No further validation is required.
+
+ // Fill in desc. A generic descriptor has none of these fields, so copy
+ // everything from shape.
+ MOZ_ASSERT(!desc.hasValue());
+ MOZ_ASSERT(!desc.hasWritable());
+ MOZ_ASSERT(!desc.hasGetter());
+ MOZ_ASSERT(!desc.hasSetter());
+ if (IsDataDescriptor(prop)) {
+ RootedValue currentValue(cx);
+ if (!GetExistingDataProperty(cx, obj, id, prop, &currentValue)) {
+ return false;
+ }
+ desc.setValue(currentValue);
+ desc.setWritable(attrs.writable());
+ } else {
+ PropertyInfo propInfo = prop.propertyInfo();
+ desc.setGetter(obj->getGetter(propInfo));
+ desc.setSetter(obj->getSetter(propInfo));
+ }
+ } else if (desc.isDataDescriptor() != IsDataDescriptor(prop)) {
+ // Step 6.
+ if (!attrs.configurable()) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+
+ // Fill in desc fields with default values (steps 6.b.i and 6.c.i).
+ CompletePropertyDescriptor(&desc);
+ } else if (desc.isDataDescriptor()) {
+ // Step 7.
+ bool frozen = !attrs.configurable() && !attrs.writable();
+
+ // Step 7.a.i.1.
+ if (frozen && desc.hasWritable() && desc.writable()) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+
+ if (frozen || !desc.hasValue()) {
+ RootedValue currentValue(cx);
+ if (!GetExistingDataProperty(cx, obj, id, prop, &currentValue)) {
+ return false;
+ }
+
+ if (!desc.hasValue()) {
+ // Fill in desc.[[Value]].
+ desc.setValue(currentValue);
+ } else {
+ // Step 7.a.i.2.
+ bool same;
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ if (!SameValue(cx, desc.value(), currentValue, &same)) {
+ return false;
+ }
+ if (!same) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+ }
+ }
+
+ // Step 7.a.i.3.
+ if (frozen) {
+ return result.succeed();
+ }
+
+ // Fill in desc.[[Writable]].
+ if (!desc.hasWritable()) {
+ desc.setWritable(attrs.writable());
+ }
+ } else {
+ // Step 8.
+ PropertyInfo propInfo = prop.propertyInfo();
+ MOZ_ASSERT(propInfo.isAccessorProperty());
+ MOZ_ASSERT(desc.isAccessorDescriptor());
+
+ // The spec says to use SameValue, but since the values in
+ // question are objects, we can just compare pointers.
+ if (desc.hasSetter()) {
+ // Step 8.a.i.
+ if (!attrs.configurable() && desc.setter() != obj->getSetter(propInfo)) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+ } else {
+ // Fill in desc.[[Set]] from shape.
+ desc.setSetter(obj->getSetter(propInfo));
+ }
+ if (desc.hasGetter()) {
+ // Step 8.a.ii.
+ if (!attrs.configurable() && desc.getter() != obj->getGetter(propInfo)) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+ } else {
+ // Fill in desc.[[Get]] from shape.
+ desc.setGetter(obj->getGetter(propInfo));
+ }
+
+ // Step 8.a.iii (Omitted).
+ }
+
+ // Step 9.
+ if (!AddOrChangeProperty<IsAddOrChange::Change>(cx, obj, id, desc, &prop)) {
+ return false;
+ }
+
+ // Step 10.
+ return result.succeed();
+}
+
+bool js::NativeDefineDataProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, HandleValue value,
+ unsigned attrs, ObjectOpResult& result) {
+ Rooted<PropertyDescriptor> desc(cx, PropertyDescriptor::Data(value, attrs));
+ return NativeDefineProperty(cx, obj, id, desc, result);
+}
+
+bool js::NativeDefineAccessorProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, HandleObject getter,
+ HandleObject setter, unsigned attrs) {
+ Rooted<PropertyDescriptor> desc(
+ cx, PropertyDescriptor::Accessor(
+ getter ? mozilla::Some(getter) : mozilla::Nothing(),
+ setter ? mozilla::Some(setter) : mozilla::Nothing(), attrs));
+
+ ObjectOpResult result;
+ if (!NativeDefineProperty(cx, obj, id, desc, result)) {
+ return false;
+ }
+
+ if (!result) {
+ // Off-thread callers should not get here: they must call this
+ // function only with known-valid arguments. Populating a new
+ // PlainObject with configurable properties is fine.
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ result.reportError(cx, obj, id);
+ return false;
+ }
+
+ return true;
+}
+
+bool js::NativeDefineDataProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, HandleValue value,
+ unsigned attrs) {
+ ObjectOpResult result;
+ if (!NativeDefineDataProperty(cx, obj, id, value, attrs, result)) {
+ return false;
+ }
+ if (!result) {
+ // Off-thread callers should not get here: they must call this
+ // function only with known-valid arguments. Populating a new
+ // PlainObject with configurable properties is fine.
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ result.reportError(cx, obj, id);
+ return false;
+ }
+ return true;
+}
+
+bool js::NativeDefineDataProperty(JSContext* cx, Handle<NativeObject*> obj,
+ PropertyName* name, HandleValue value,
+ unsigned attrs) {
+ RootedId id(cx, NameToId(name));
+ return NativeDefineDataProperty(cx, obj, id, value, attrs);
+}
+
+static bool DefineNonexistentProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, HandleValue v,
+ ObjectOpResult& result) {
+ // Optimized NativeDefineProperty() version for known absent properties.
+
+ // Dispense with custom behavior of exotic native objects first.
+ if (obj->is<ArrayObject>()) {
+ // Array's length property is non-configurable, so we shouldn't
+ // encounter it in this function.
+ MOZ_ASSERT(id != NameToId(cx->names().length));
+
+ // 9.4.2.1 step 3. Don't extend a fixed-length array.
+ uint32_t index;
+ if (IdIsIndex(id, &index)) {
+ if (WouldDefinePastNonwritableLength(&obj->as<ArrayObject>(), index)) {
+ return result.fail(JSMSG_CANT_DEFINE_PAST_ARRAY_LENGTH);
+ }
+ }
+ } else if (obj->is<TypedArrayObject>()) {
+ // 9.4.5.5 step 2. Indexed properties of typed arrays are special.
+ if (mozilla::Maybe<uint64_t> index = ToTypedArrayIndex(id)) {
+ // This method is only called for non-existent properties, which
+ // means any absent indexed property must be out of range.
+ MOZ_ASSERT(index.value() >= obj->as<TypedArrayObject>().length());
+
+ // The following steps refer to 9.4.5.11 IntegerIndexedElementSet.
+
+ // Step 1 is enforced by the caller.
+
+ // Steps 2-3.
+ // We still need to call ToNumber or ToBigInt, because of its
+ // possible side effects.
+ if (!obj->as<TypedArrayObject>().convertForSideEffect(cx, v)) {
+ return false;
+ }
+
+ // Step 4 (nothing to do, the index is out of range).
+
+ // Step 5.
+ return result.succeed();
+ }
+ } else if (obj->is<ArgumentsObject>()) {
+ // If this method is called with either |length| or |@@iterator|, the
+ // property was previously deleted and hence should already be marked
+ // as overridden.
+ MOZ_ASSERT_IF(id.isAtom(cx->names().length),
+ obj->as<ArgumentsObject>().hasOverriddenLength());
+ MOZ_ASSERT_IF(id.isWellKnownSymbol(JS::SymbolCode::iterator),
+ obj->as<ArgumentsObject>().hasOverriddenIterator());
+
+ // We still need to mark any element properties as overridden.
+ if (id.isInt()) {
+ obj->as<ArgumentsObject>().markElementOverridden();
+ }
+ }
+
+#ifdef DEBUG
+ PropertyResult prop;
+ if (!NativeLookupOwnPropertyNoResolve(cx, obj, id, &prop)) {
+ return false;
+ }
+ MOZ_ASSERT(prop.isNotFound(), "didn't expect to find an existing property");
+#endif
+
+ // 9.1.6.3, ValidateAndApplyPropertyDescriptor.
+ // Step 1 is a redundant assertion, step 3 and later don't apply here.
+
+ // Step 2.
+ if (!obj->isExtensible()) {
+ return result.fail(JSMSG_CANT_DEFINE_PROP_OBJECT_NOT_EXTENSIBLE);
+ }
+
+ if (id.isInt()) {
+ // This might be a dense element. Use AddOrChangeProperty as it knows
+ // how to deal with that.
+ Rooted<PropertyDescriptor> desc(
+ cx, PropertyDescriptor::Data(v, {JS::PropertyAttribute::Configurable,
+ JS::PropertyAttribute::Enumerable,
+ JS::PropertyAttribute::Writable}));
+ if (!AddOrChangeProperty<IsAddOrChange::Add>(cx, obj, id, desc)) {
+ return false;
+ }
+ } else {
+ if (!AddDataProperty(cx, obj, id, v)) {
+ return false;
+ }
+ }
+
+ return result.succeed();
+}
+
+bool js::AddOrUpdateSparseElementHelper(JSContext* cx,
+ Handle<NativeObject*> obj,
+ int32_t int_id, HandleValue v,
+ bool strict) {
+ MOZ_ASSERT(obj->is<ArrayObject>() || obj->is<PlainObject>());
+
+ // This helper doesn't handle the case where the index is a dense element.
+ MOZ_ASSERT(int_id >= 0);
+ MOZ_ASSERT(!obj->containsDenseElement(int_id));
+
+ MOZ_ASSERT(PropertyKey::fitsInInt(int_id));
+ RootedId id(cx, PropertyKey::Int(int_id));
+
+ // First decide if this is an add or an update. Because the IC guards have
+ // already ensured this exists exterior to the dense array range, and the
+ // prototype checks have ensured there are no indexes on the prototype, we
+ // can use the shape lineage to find the element if it exists:
+ uint32_t index;
+ PropMap* map = obj->shape()->lookup(cx, id, &index);
+
+ // If we didn't find the property, we're on the add path: delegate to
+ // AddOrChangeProperty. This will add either a sparse element or a dense
+ // element.
+ if (map == nullptr) {
+ Rooted<PropertyDescriptor> desc(
+ cx, PropertyDescriptor::Data(v, {JS::PropertyAttribute::Configurable,
+ JS::PropertyAttribute::Enumerable,
+ JS::PropertyAttribute::Writable}));
+ return AddOrChangeProperty<IsAddOrChange::Add>(cx, obj, id, desc);
+ }
+
+ // At this point we're updating a property: See SetExistingProperty.
+ PropertyInfo prop = map->getPropertyInfo(index);
+ if (prop.isDataProperty() && prop.writable()) {
+ obj->setSlot(prop.slot(), v);
+ return true;
+ }
+
+ // We don't know exactly what this object looks like, hit the slowpath.
+ RootedValue receiver(cx, ObjectValue(*obj));
+ JS::ObjectOpResult result;
+ return SetProperty(cx, obj, id, v, receiver, result) &&
+ result.checkStrictModeError(cx, obj, id, strict);
+}
+
+/*** [[HasProperty]] ********************************************************/
+
+// ES6 draft rev31 9.1.7.1 OrdinaryHasProperty
+bool js::NativeHasProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, bool* foundp) {
+ Rooted<NativeObject*> pobj(cx, obj);
+ PropertyResult prop;
+
+ // This loop isn't explicit in the spec algorithm. See the comment on step
+ // 7.a. below.
+ for (;;) {
+ // Steps 2-3.
+ if (!NativeLookupOwnPropertyInline<CanGC>(cx, pobj, id, &prop)) {
+ return false;
+ }
+
+ // Step 4.
+ if (prop.isFound()) {
+ *foundp = true;
+ return true;
+ }
+
+ // Step 5-6.
+ JSObject* proto = pobj->staticPrototype();
+
+ // Step 8.
+ // As a side-effect of NativeLookupOwnPropertyInline, we may determine that
+ // a property is not found and the proto chain should not be searched. This
+ // can occur for:
+ // - Out-of-range numeric properties of a TypedArrayObject
+ // - Recursive resolve hooks (which is expected when they try to set the
+ // property being resolved).
+ if (!proto || prop.shouldIgnoreProtoChain()) {
+ *foundp = false;
+ return true;
+ }
+
+ // Step 7.a. If the prototype is also native, this step is a
+ // recursive tail call, and we don't need to go through all the
+ // plumbing of HasProperty; the top of the loop is where
+ // we're going to end up anyway. But if pobj is non-native,
+ // that optimization would be incorrect.
+ if (!proto->is<NativeObject>()) {
+ RootedObject protoRoot(cx, proto);
+ return HasProperty(cx, protoRoot, id, foundp);
+ }
+
+ pobj = &proto->as<NativeObject>();
+ }
+}
+
+/*** [[GetOwnPropertyDescriptor]] *******************************************/
+
+bool js::NativeGetOwnPropertyDescriptor(
+ JSContext* cx, Handle<NativeObject*> obj, HandleId id,
+ MutableHandle<mozilla::Maybe<PropertyDescriptor>> desc) {
+ PropertyResult prop;
+ if (!NativeLookupOwnProperty<CanGC>(cx, obj, id, &prop)) {
+ return false;
+ }
+ if (prop.isNotFound()) {
+ desc.reset();
+ return true;
+ }
+
+ if (prop.isNativeProperty() && prop.propertyInfo().isAccessorProperty()) {
+ PropertyInfo propInfo = prop.propertyInfo();
+ desc.set(mozilla::Some(PropertyDescriptor::Accessor(
+ obj->getGetter(propInfo), obj->getSetter(propInfo),
+ propInfo.propAttributes())));
+ return true;
+ }
+
+ RootedValue value(cx);
+ if (!GetExistingDataProperty(cx, obj, id, prop, &value)) {
+ return false;
+ }
+
+ JS::PropertyAttributes attrs = GetPropertyAttributes(obj, prop);
+ desc.set(mozilla::Some(PropertyDescriptor::Data(value, attrs)));
+ return true;
+}
+
+/*** [[Get]] ****************************************************************/
+
+static bool GetCustomDataProperty(JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandleValue vp) {
+ cx->check(obj, id, vp);
+
+ const JSClass* clasp = obj->getClass();
+ if (clasp == &ArrayObject::class_) {
+ if (!ArrayLengthGetter(cx, obj, id, vp)) {
+ return false;
+ }
+ } else if (clasp == &MappedArgumentsObject::class_) {
+ if (!MappedArgGetter(cx, obj, id, vp)) {
+ return false;
+ }
+ } else {
+ MOZ_RELEASE_ASSERT(clasp == &UnmappedArgumentsObject::class_);
+ if (!UnmappedArgGetter(cx, obj, id, vp)) {
+ return false;
+ }
+ }
+
+ cx->check(vp);
+ return true;
+}
+
+static inline bool CallGetter(JSContext* cx, Handle<NativeObject*> obj,
+ HandleValue receiver, HandleId id,
+ PropertyInfo prop, MutableHandleValue vp) {
+ MOZ_ASSERT(!prop.isDataProperty());
+
+ if (prop.isAccessorProperty()) {
+ RootedValue getter(cx, obj->getGetterValue(prop));
+ return js::CallGetter(cx, receiver, getter, vp);
+ }
+
+ MOZ_ASSERT(prop.isCustomDataProperty());
+
+ return GetCustomDataProperty(cx, obj, id, vp);
+}
+
+template <AllowGC allowGC>
+static MOZ_ALWAYS_INLINE bool GetExistingProperty(
+ JSContext* cx, typename MaybeRooted<Value, allowGC>::HandleType receiver,
+ typename MaybeRooted<NativeObject*, allowGC>::HandleType obj,
+ typename MaybeRooted<jsid, allowGC>::HandleType id, PropertyInfo prop,
+ typename MaybeRooted<Value, allowGC>::MutableHandleType vp) {
+ if (prop.isDataProperty()) {
+ vp.set(obj->getSlot(prop.slot()));
+ return true;
+ }
+
+ vp.setUndefined();
+
+ if (!prop.isCustomDataProperty() && !obj->hasGetter(prop)) {
+ return true;
+ }
+
+ if constexpr (!allowGC) {
+ return false;
+ } else {
+ return CallGetter(cx, obj, receiver, id, prop, vp);
+ }
+}
+
+bool js::NativeGetExistingProperty(JSContext* cx, HandleObject receiver,
+ Handle<NativeObject*> obj, HandleId id,
+ PropertyInfo prop, MutableHandleValue vp) {
+ RootedValue receiverValue(cx, ObjectValue(*receiver));
+ return GetExistingProperty<CanGC>(cx, receiverValue, obj, id, prop, vp);
+}
+
+enum IsNameLookup { NotNameLookup = false, NameLookup = true };
+
+/*
+ * Finish getting the property `receiver[id]` after looking at every object on
+ * the prototype chain and not finding any such property.
+ *
+ * Per the spec, this should just set the result to `undefined` and call it a
+ * day. However this function also runs when we're evaluating an
+ * expression that's an Identifier (that is, an unqualified name lookup),
+ * so we need to figure out if that's what's happening and throw
+ * a ReferenceError if so.
+ */
+static bool GetNonexistentProperty(JSContext* cx, HandleId id,
+ IsNameLookup nameLookup,
+ MutableHandleValue vp) {
+ vp.setUndefined();
+
+ // If we are doing a name lookup, this is a ReferenceError.
+ if (nameLookup) {
+ ReportIsNotDefined(cx, id);
+ return false;
+ }
+
+ // Otherwise, just return |undefined|.
+ return true;
+}
+
+// The NoGC version of GetNonexistentProperty, present only to make types line
+// up.
+bool GetNonexistentProperty(JSContext* cx, const jsid& id,
+ IsNameLookup nameLookup,
+ FakeMutableHandle<Value> vp) {
+ return false;
+}
+
+static inline bool GeneralizedGetProperty(JSContext* cx, HandleObject obj,
+ HandleId id, HandleValue receiver,
+ IsNameLookup nameLookup,
+ MutableHandleValue vp) {
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return false;
+ }
+ if (nameLookup) {
+ // When nameLookup is true, GetProperty implements ES6 rev 34 (2015 Feb
+ // 20) 8.1.1.2.6 GetBindingValue, with step 3 (the call to HasProperty)
+ // and step 6 (the call to Get) fused so that only a single lookup is
+ // needed.
+ //
+ // If we get here, we've reached a non-native object. Fall back on the
+ // algorithm as specified, with two separate lookups. (Note that we
+ // throw ReferenceErrors regardless of strictness, technically a bug.)
+
+ bool found;
+ if (!HasProperty(cx, obj, id, &found)) {
+ return false;
+ }
+ if (!found) {
+ ReportIsNotDefined(cx, id);
+ return false;
+ }
+ }
+
+ return GetProperty(cx, obj, receiver, id, vp);
+}
+
+static inline bool GeneralizedGetProperty(JSContext* cx, JSObject* obj, jsid id,
+ const Value& receiver,
+ IsNameLookup nameLookup,
+ FakeMutableHandle<Value> vp) {
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.checkDontReport(cx)) {
+ return false;
+ }
+ if (nameLookup) {
+ return false;
+ }
+ return GetPropertyNoGC(cx, obj, receiver, id, vp.address());
+}
+
+bool js::GetSparseElementHelper(JSContext* cx, Handle<NativeObject*> obj,
+ int32_t int_id, MutableHandleValue result) {
+ MOZ_ASSERT(obj->is<ArrayObject>() || obj->is<PlainObject>());
+
+ // This helper doesn't handle the case where the index is a dense element.
+ MOZ_ASSERT(int_id >= 0);
+ MOZ_ASSERT(!obj->containsDenseElement(int_id));
+
+ // Indexed properties can not exist on the prototype chain.
+ MOZ_ASSERT(!PrototypeMayHaveIndexedProperties(obj));
+
+ MOZ_ASSERT(PropertyKey::fitsInInt(int_id));
+ RootedId id(cx, PropertyKey::Int(int_id));
+
+ uint32_t index;
+ PropMap* map = obj->shape()->lookup(cx, id, &index);
+ if (!map) {
+ // Property not found, return directly.
+ result.setUndefined();
+ return true;
+ }
+
+ PropertyInfo prop = map->getPropertyInfo(index);
+ RootedValue receiver(cx, ObjectValue(*obj));
+ return GetExistingProperty<CanGC>(cx, receiver, obj, id, prop, result);
+}
+
+template <AllowGC allowGC>
+static MOZ_ALWAYS_INLINE bool NativeGetPropertyInline(
+ JSContext* cx, typename MaybeRooted<NativeObject*, allowGC>::HandleType obj,
+ typename MaybeRooted<Value, allowGC>::HandleType receiver,
+ typename MaybeRooted<jsid, allowGC>::HandleType id, IsNameLookup nameLookup,
+ typename MaybeRooted<Value, allowGC>::MutableHandleType vp) {
+ typename MaybeRooted<NativeObject*, allowGC>::RootType pobj(cx, obj);
+ PropertyResult prop;
+
+ // This loop isn't explicit in the spec algorithm. See the comment on step
+ // 4.d below.
+ for (;;) {
+ // Steps 2-3.
+ if (!NativeLookupOwnPropertyInline<allowGC>(cx, pobj, id, &prop)) {
+ return false;
+ }
+
+ if (prop.isFound()) {
+ // Steps 5-8. Special case for dense elements because
+ // GetExistingProperty doesn't support those.
+ if (prop.isDenseElement()) {
+ vp.set(pobj->getDenseElement(prop.denseElementIndex()));
+ return true;
+ }
+ if (prop.isTypedArrayElement()) {
+ size_t idx = prop.typedArrayElementIndex();
+ auto* tarr = &pobj->template as<TypedArrayObject>();
+ return tarr->template getElement<allowGC>(cx, idx, vp);
+ }
+
+ return GetExistingProperty<allowGC>(cx, receiver, pobj, id,
+ prop.propertyInfo(), vp);
+ }
+
+ // Steps 4.a-b.
+ JSObject* proto = pobj->staticPrototype();
+
+ // Step 4.c. The spec algorithm simply returns undefined if proto is
+ // null, but see the comment on GetNonexistentProperty.
+ if (!proto || prop.shouldIgnoreProtoChain()) {
+ return GetNonexistentProperty(cx, id, nameLookup, vp);
+ }
+
+ // Step 4.d. If the prototype is also native, this step is a
+ // recursive tail call, and we don't need to go through all the
+ // plumbing of JSObject::getGeneric; the top of the loop is where
+ // we're going to end up anyway. But if pobj is non-native,
+ // that optimization would be incorrect.
+ if (proto->getOpsGetProperty()) {
+ RootedObject protoRoot(cx, proto);
+ return GeneralizedGetProperty(cx, protoRoot, id, receiver, nameLookup,
+ vp);
+ }
+
+ pobj = &proto->as<NativeObject>();
+ }
+}
+
+bool js::NativeGetProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleValue receiver, HandleId id,
+ MutableHandleValue vp) {
+ return NativeGetPropertyInline<CanGC>(cx, obj, receiver, id, NotNameLookup,
+ vp);
+}
+
+bool js::NativeGetPropertyNoGC(JSContext* cx, NativeObject* obj,
+ const Value& receiver, jsid id, Value* vp) {
+ AutoAssertNoPendingException noexc(cx);
+ return NativeGetPropertyInline<NoGC>(cx, obj, receiver, id, NotNameLookup,
+ vp);
+}
+
+bool js::NativeGetElement(JSContext* cx, Handle<NativeObject*> obj,
+ HandleValue receiver, int32_t index,
+ MutableHandleValue vp) {
+ RootedId id(cx);
+
+ if (MOZ_LIKELY(index >= 0)) {
+ if (!IndexToId(cx, index, &id)) {
+ return false;
+ }
+ } else {
+ RootedValue indexVal(cx, Int32Value(index));
+ if (!PrimitiveValueToId<CanGC>(cx, indexVal, &id)) {
+ return false;
+ }
+ }
+ return NativeGetProperty(cx, obj, receiver, id, vp);
+}
+
+bool js::GetNameBoundInEnvironment(JSContext* cx, HandleObject envArg,
+ HandleId id, MutableHandleValue vp) {
+ // Manually unwrap 'with' environments to prevent looking up @@unscopables
+ // twice.
+ //
+ // This is unfortunate because internally, the engine does not distinguish
+ // HasProperty from HasBinding: both are implemented as a HasPropertyOp
+ // hook on a WithEnvironmentObject.
+ //
+ // In the case of attempting to get the value of a binding already looked up
+ // via JSOp::BindName, calling HasProperty on the WithEnvironmentObject is
+ // equivalent to calling HasBinding a second time. This results in the
+ // incorrect behavior of performing the @@unscopables check again.
+ RootedObject env(cx, MaybeUnwrapWithEnvironment(envArg));
+ RootedValue receiver(cx, ObjectValue(*env));
+ if (env->getOpsGetProperty()) {
+ return GeneralizedGetProperty(cx, env, id, receiver, NameLookup, vp);
+ }
+ return NativeGetPropertyInline<CanGC>(cx, env.as<NativeObject>(), receiver,
+ id, NameLookup, vp);
+}
+
+/*** [[Set]] ****************************************************************/
+
+static bool SetCustomDataProperty(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue v, ObjectOpResult& result) {
+ cx->check(obj, id, v);
+
+ const JSClass* clasp = obj->getClass();
+ if (clasp == &ArrayObject::class_) {
+ return ArrayLengthSetter(cx, obj, id, v, result);
+ }
+ if (clasp == &MappedArgumentsObject::class_) {
+ return MappedArgSetter(cx, obj, id, v, result);
+ }
+ MOZ_RELEASE_ASSERT(clasp == &UnmappedArgumentsObject::class_);
+ return UnmappedArgSetter(cx, obj, id, v, result);
+}
+
+static bool MaybeReportUndeclaredVarAssignment(JSContext* cx, HandleId id) {
+ {
+ jsbytecode* pc;
+ JSScript* script =
+ cx->currentScript(&pc, JSContext::AllowCrossRealm::Allow);
+ if (!script) {
+ return true;
+ }
+
+ if (!IsStrictSetPC(pc)) {
+ return true;
+ }
+ }
+
+ UniqueChars bytes =
+ IdToPrintableUTF8(cx, id, IdToPrintableBehavior::IdIsIdentifier);
+ if (!bytes) {
+ return false;
+ }
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_UNDECLARED_VAR,
+ bytes.get());
+ return false;
+}
+
+/*
+ * Finish assignment to a shapeful data property of a native object obj. This
+ * conforms to no standard and there is a lot of legacy baggage here.
+ */
+static bool NativeSetExistingDataProperty(JSContext* cx,
+ Handle<NativeObject*> obj,
+ HandleId id, PropertyInfo prop,
+ HandleValue v,
+ ObjectOpResult& result) {
+ MOZ_ASSERT(obj->is<NativeObject>());
+ MOZ_ASSERT(prop.isDataDescriptor());
+
+ if (prop.isDataProperty()) {
+ // The common path. Standard data property.
+ obj->setSlot(prop.slot(), v);
+ return result.succeed();
+ }
+
+ MOZ_ASSERT(prop.isCustomDataProperty());
+ MOZ_ASSERT(!obj->is<WithEnvironmentObject>()); // See bug 1128681.
+
+ return SetCustomDataProperty(cx, obj, id, v, result);
+}
+
+/*
+ * When a [[Set]] operation finds no existing property with the given id
+ * or finds a writable data property on the prototype chain, we end up here.
+ * Finish the [[Set]] by defining a new property on receiver.
+ *
+ * This implements ES6 draft rev 28, 9.1.9 [[Set]] steps 5.b-f, but it
+ * is really old code and there are a few barnacles.
+ */
+bool js::SetPropertyByDefining(JSContext* cx, HandleId id, HandleValue v,
+ HandleValue receiverValue,
+ ObjectOpResult& result) {
+ // Step 5.b.
+ if (!receiverValue.isObject()) {
+ return result.fail(JSMSG_SET_NON_OBJECT_RECEIVER);
+ }
+ RootedObject receiver(cx, &receiverValue.toObject());
+
+ bool existing;
+ {
+ // Steps 5.c-d.
+ Rooted<mozilla::Maybe<PropertyDescriptor>> desc(cx);
+ if (!GetOwnPropertyDescriptor(cx, receiver, id, &desc)) {
+ return false;
+ }
+
+ existing = desc.isSome();
+
+ // Step 5.e.
+ if (existing) {
+ // Step 5.e.i.
+ if (desc->isAccessorDescriptor()) {
+ return result.fail(JSMSG_OVERWRITING_ACCESSOR);
+ }
+
+ // Step 5.e.ii.
+ if (!desc->writable()) {
+ return result.fail(JSMSG_READ_ONLY);
+ }
+ }
+ }
+
+ // Steps 5.e.iii-iv. and 5.f.i. Define the new data property.
+ Rooted<PropertyDescriptor> desc(cx);
+ if (existing) {
+ desc = PropertyDescriptor::Empty();
+ desc.setValue(v);
+ } else {
+ desc = PropertyDescriptor::Data(v, {JS::PropertyAttribute::Configurable,
+ JS::PropertyAttribute::Enumerable,
+ JS::PropertyAttribute::Writable});
+ }
+ return DefineProperty(cx, receiver, id, desc, result);
+}
+
+// When setting |id| for |receiver| and |obj| has no property for id, continue
+// the search up the prototype chain.
+bool js::SetPropertyOnProto(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue v, HandleValue receiver,
+ ObjectOpResult& result) {
+ MOZ_ASSERT(!obj->is<ProxyObject>());
+
+ RootedObject proto(cx, obj->staticPrototype());
+ if (proto) {
+ return SetProperty(cx, proto, id, v, receiver, result);
+ }
+
+ return SetPropertyByDefining(cx, id, v, receiver, result);
+}
+
+/*
+ * Implement "the rest of" assignment to a property when no property
+ * receiver[id] was found anywhere on the prototype chain.
+ *
+ * FIXME: This should be updated to follow ES6 draft rev 28, section 9.1.9,
+ * steps 4.d.i and 5.
+ */
+template <QualifiedBool IsQualified>
+static bool SetNonexistentProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, HandleValue v,
+ HandleValue receiver,
+ ObjectOpResult& result) {
+ if (!IsQualified && receiver.isObject() &&
+ receiver.toObject().isUnqualifiedVarObj()) {
+ if (!MaybeReportUndeclaredVarAssignment(cx, id)) {
+ return false;
+ }
+ }
+
+ // Pure optimization for the common case. There's no point performing the
+ // lookup in step 5.c again, as our caller just did it for us.
+ if (IsQualified && receiver.isObject() && obj == &receiver.toObject()) {
+ // Ensure that a custom GetOwnPropertyOp, if present, doesn't
+ // introduce additional properties which weren't previously found by
+ // LookupOwnProperty.
+#ifdef DEBUG
+ if (GetOwnPropertyOp op = obj->getOpsGetOwnPropertyDescriptor()) {
+ Rooted<mozilla::Maybe<PropertyDescriptor>> desc(cx);
+ if (!op(cx, obj, id, &desc)) {
+ return false;
+ }
+ MOZ_ASSERT(desc.isNothing());
+ }
+#endif
+
+ // Step 5.e. Define the new data property.
+ if (DefinePropertyOp op = obj->getOpsDefineProperty()) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+
+ Rooted<PropertyDescriptor> desc(
+ cx, PropertyDescriptor::Data(v, {JS::PropertyAttribute::Configurable,
+ JS::PropertyAttribute::Enumerable,
+ JS::PropertyAttribute::Writable}));
+ return op(cx, obj, id, desc, result);
+ }
+
+ return DefineNonexistentProperty(cx, obj, id, v, result);
+ }
+
+ return SetPropertyByDefining(cx, id, v, receiver, result);
+}
+
+// Set an existing own property obj[index] that's a dense element.
+static bool SetDenseElement(JSContext* cx, Handle<NativeObject*> obj,
+ uint32_t index, HandleValue v,
+ ObjectOpResult& result) {
+ MOZ_ASSERT(!obj->is<TypedArrayObject>());
+ MOZ_ASSERT(obj->containsDenseElement(index));
+
+ obj->setDenseElement(index, v);
+ return result.succeed();
+}
+
+/*
+ * Finish the assignment `receiver[id] = v` when an existing property (shape)
+ * has been found on a native object (pobj). This implements ES6 draft rev 32
+ * (2015 Feb 2) 9.1.9 steps 5 and 6.
+ *
+ * It is necessary to pass both id and shape because shape could be an implicit
+ * dense or typed array element (i.e. not actually a pointer to a Shape).
+ */
+static bool SetExistingProperty(JSContext* cx, HandleId id, HandleValue v,
+ HandleValue receiver,
+ Handle<NativeObject*> pobj,
+ const PropertyResult& prop,
+ ObjectOpResult& result) {
+ // Step 5 for dense elements.
+ if (prop.isDenseElement() || prop.isTypedArrayElement()) {
+ // Step 5.a.
+ if (pobj->denseElementsAreFrozen()) {
+ return result.fail(JSMSG_READ_ONLY);
+ }
+
+ // Pure optimization for the common case:
+ if (receiver.isObject() && pobj == &receiver.toObject()) {
+ if (prop.isTypedArrayElement()) {
+ Rooted<TypedArrayObject*> tobj(cx, &pobj->as<TypedArrayObject>());
+ size_t idx = prop.typedArrayElementIndex();
+ return SetTypedArrayElement(cx, tobj, idx, v, result);
+ }
+
+ return SetDenseElement(cx, pobj, prop.denseElementIndex(), v, result);
+ }
+
+ // Steps 5.b-f.
+ return SetPropertyByDefining(cx, id, v, receiver, result);
+ }
+
+ // Step 5 for all other properties.
+ PropertyInfo propInfo = prop.propertyInfo();
+ if (propInfo.isDataDescriptor()) {
+ // Step 5.a.
+ if (!propInfo.writable()) {
+ return result.fail(JSMSG_READ_ONLY);
+ }
+
+ // steps 5.c-f.
+ if (receiver.isObject() && pobj == &receiver.toObject()) {
+ // Pure optimization for the common case. There's no point performing
+ // the lookup in step 5.c again, as our caller just did it for us. The
+ // result is |shapeProp|.
+
+ // Steps 5.e.i-ii.
+ return NativeSetExistingDataProperty(cx, pobj, id, propInfo, v, result);
+ }
+
+ // Shadow pobj[id] by defining a new data property receiver[id].
+ // Delegate everything to SetPropertyByDefining.
+ return SetPropertyByDefining(cx, id, v, receiver, result);
+ }
+
+ // Steps 6-11.
+ MOZ_ASSERT(propInfo.isAccessorProperty());
+
+ JSObject* setterObject = pobj->getSetter(propInfo);
+ if (!setterObject) {
+ return result.fail(JSMSG_GETTER_ONLY);
+ }
+
+ RootedValue setter(cx, ObjectValue(*setterObject));
+ if (!js::CallSetter(cx, receiver, setter, v)) {
+ return false;
+ }
+
+ return result.succeed();
+}
+
+template <QualifiedBool IsQualified>
+bool js::NativeSetProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, HandleValue v, HandleValue receiver,
+ ObjectOpResult& result) {
+ // Step numbers below reference ES6 rev 27 9.1.9, the [[Set]] internal
+ // method for ordinary objects. We substitute our own names for these names
+ // used in the spec: O -> pobj, P -> id, ownDesc -> shape.
+ PropertyResult prop;
+ Rooted<NativeObject*> pobj(cx, obj);
+
+ // This loop isn't explicit in the spec algorithm. See the comment on step
+ // 4.c.i below. (There's a very similar loop in the NativeGetProperty
+ // implementation, but unfortunately not similar enough to common up.)
+ //
+ // We're intentionally not spec-compliant for TypedArrays:
+ // When |pobj| is a TypedArray and |id| is a TypedArray index, we should
+ // ignore |receiver| and instead always try to set the property on |pobj|.
+ // Bug 1502889 showed that this behavior isn't web-compatible. This issue is
+ // also reported at <https://github.com/tc39/ecma262/issues/1541>.
+ for (;;) {
+ // Steps 2-3.
+ if (!NativeLookupOwnPropertyInline<CanGC>(cx, pobj, id, &prop)) {
+ return false;
+ }
+
+ if (prop.isFound()) {
+ // Steps 5-6.
+ return SetExistingProperty(cx, id, v, receiver, pobj, prop, result);
+ }
+
+ // Steps 4.a-b.
+ // As a side-effect of NativeLookupOwnPropertyInline, we may determine that
+ // a property is not found and the proto chain should not be searched. This
+ // can occur for:
+ // - Out-of-range numeric properties of a TypedArrayObject
+ // - Recursive resolve hooks (which is expected when they try to set the
+ // property being resolved).
+ JSObject* proto = pobj->staticPrototype();
+ if (!proto || prop.shouldIgnoreProtoChain()) {
+ // Step 4.d.i (and step 5).
+ return SetNonexistentProperty<IsQualified>(cx, obj, id, v, receiver,
+ result);
+ }
+
+ // Step 4.c.i. If the prototype is also native, this step is a
+ // recursive tail call, and we don't need to go through all the
+ // plumbing of SetProperty; the top of the loop is where we're going to
+ // end up anyway. But if pobj is non-native, that optimization would be
+ // incorrect.
+ if (!proto->is<NativeObject>()) {
+ // Unqualified assignments are not specified to go through [[Set]]
+ // at all, but they do go through this function. So check for
+ // unqualified assignment to a nonexistent global (a strict error).
+ RootedObject protoRoot(cx, proto);
+ if (!IsQualified) {
+ bool found;
+ if (!HasProperty(cx, protoRoot, id, &found)) {
+ return false;
+ }
+ if (!found) {
+ return SetNonexistentProperty<IsQualified>(cx, obj, id, v, receiver,
+ result);
+ }
+ }
+
+ return SetProperty(cx, protoRoot, id, v, receiver, result);
+ }
+ pobj = &proto->as<NativeObject>();
+ }
+}
+
+template bool js::NativeSetProperty<Qualified>(JSContext* cx,
+ Handle<NativeObject*> obj,
+ HandleId id, HandleValue value,
+ HandleValue receiver,
+ ObjectOpResult& result);
+
+template bool js::NativeSetProperty<Unqualified>(JSContext* cx,
+ Handle<NativeObject*> obj,
+ HandleId id, HandleValue value,
+ HandleValue receiver,
+ ObjectOpResult& result);
+
+bool js::NativeSetElement(JSContext* cx, Handle<NativeObject*> obj,
+ uint32_t index, HandleValue v, HandleValue receiver,
+ ObjectOpResult& result) {
+ RootedId id(cx);
+ if (!IndexToId(cx, index, &id)) {
+ return false;
+ }
+ return NativeSetProperty<Qualified>(cx, obj, id, v, receiver, result);
+}
+
+/*** [[Delete]] *************************************************************/
+
+static bool CallJSDeletePropertyOp(JSContext* cx, JSDeletePropertyOp op,
+ HandleObject receiver, HandleId id,
+ ObjectOpResult& result) {
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return false;
+ }
+
+ cx->check(receiver, id);
+ if (op) {
+ return op(cx, receiver, id, result);
+ }
+ return result.succeed();
+}
+
+// ES6 draft rev31 9.1.10 [[Delete]]
+bool js::NativeDeleteProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, ObjectOpResult& result) {
+#ifdef ENABLE_RECORD_TUPLE
+ MOZ_ASSERT(!js::IsExtendedPrimitive(*obj));
+#endif
+
+ // Steps 2-3.
+ PropertyResult prop;
+ if (!NativeLookupOwnProperty<CanGC>(cx, obj, id, &prop)) {
+ return false;
+ }
+
+ // Step 4.
+ if (prop.isNotFound()) {
+ // If no property call the class's delProperty hook, passing succeeded
+ // as the result parameter. This always succeeds when there is no hook.
+ return CallJSDeletePropertyOp(cx, obj->getClass()->getDelProperty(), obj,
+ id, result);
+ }
+
+ // Step 6. Non-configurable property.
+ if (!GetPropertyAttributes(obj, prop).configurable()) {
+ return result.failCantDelete();
+ }
+
+ // Typed array elements are configurable, but can't be deleted.
+ if (prop.isTypedArrayElement()) {
+ return result.failCantDelete();
+ }
+
+ if (!CallJSDeletePropertyOp(cx, obj->getClass()->getDelProperty(), obj, id,
+ result)) {
+ return false;
+ }
+ if (!result) {
+ return true;
+ }
+
+ // Step 5.
+ if (prop.isDenseElement()) {
+ obj->setDenseElementHole(prop.denseElementIndex());
+ } else {
+ if (!NativeObject::removeProperty(cx, obj, id)) {
+ return false;
+ }
+ }
+
+ return SuppressDeletedProperty(cx, obj, id);
+}
+
+bool js::CopyDataPropertiesNative(JSContext* cx, Handle<PlainObject*> target,
+ Handle<NativeObject*> from,
+ Handle<PlainObject*> excludedItems,
+ bool* optimized) {
+#ifdef ENABLE_RECORD_TUPLE
+ MOZ_ASSERT(!js::IsExtendedPrimitive(*target));
+#endif
+
+ *optimized = false;
+
+ // Don't use the fast path if |from| may have extra indexed or lazy
+ // properties.
+ if (from->getDenseInitializedLength() > 0 || from->isIndexed() ||
+ from->is<TypedArrayObject>() ||
+ IF_RECORD_TUPLE(from->is<RecordObject>() || from->is<TupleObject>(),
+ false) ||
+ from->getClass()->getNewEnumerate() || from->getClass()->getEnumerate()) {
+ return true;
+ }
+
+ // Collect all enumerable data properties.
+ Rooted<PropertyInfoWithKeyVector> props(cx, PropertyInfoWithKeyVector(cx));
+
+ Rooted<NativeShape*> fromShape(cx, from->shape());
+ for (ShapePropertyIter<NoGC> iter(fromShape); !iter.done(); iter++) {
+ jsid id = iter->key();
+ MOZ_ASSERT(!id.isInt());
+
+ if (!iter->enumerable()) {
+ continue;
+ }
+ if (excludedItems && excludedItems->contains(cx, id)) {
+ continue;
+ }
+
+ // Don't use the fast path if |from| contains non-data properties.
+ //
+ // This enables two optimizations:
+ // 1. We don't need to handle the case when accessors modify |from|.
+ // 2. String and symbol properties can be added in one go.
+ if (!iter->isDataProperty()) {
+ return true;
+ }
+
+ if (!props.append(*iter)) {
+ return false;
+ }
+ }
+
+ *optimized = true;
+
+ // If |target| contains no own properties, we can directly call
+ // AddDataPropertyNonPrototype.
+ const bool targetHadNoOwnProperties = target->empty();
+
+ RootedId key(cx);
+ RootedValue value(cx);
+ for (size_t i = props.length(); i > 0; i--) {
+ PropertyInfoWithKey prop = props[i - 1];
+ MOZ_ASSERT(prop.isDataProperty());
+ MOZ_ASSERT(prop.enumerable());
+
+ key = prop.key();
+ MOZ_ASSERT(!key.isInt());
+
+ MOZ_ASSERT(from->is<NativeObject>());
+ MOZ_ASSERT(from->shape() == fromShape);
+
+ value = from->getSlot(prop.slot());
+ if (targetHadNoOwnProperties) {
+ MOZ_ASSERT(!target->containsPure(key),
+ "didn't expect to find an existing property");
+
+ if (!AddDataPropertyToPlainObject(cx, target, key, value)) {
+ return false;
+ }
+ } else {
+ if (!NativeDefineDataProperty(cx, target, key, value, JSPROP_ENUMERATE)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
diff --git a/js/src/vm/NativeObject.h b/js/src/vm/NativeObject.h
new file mode 100644
index 0000000000..b964c88a83
--- /dev/null
+++ b/js/src/vm/NativeObject.h
@@ -0,0 +1,1892 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_NativeObject_h
+#define vm_NativeObject_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+
+#include <algorithm>
+#include <stdint.h>
+
+#include "NamespaceImports.h"
+
+#include "gc/Barrier.h"
+#include "gc/MaybeRooted.h"
+#include "gc/ZoneAllocator.h"
+#include "js/shadow/Object.h" // JS::shadow::Object
+#include "js/shadow/Zone.h" // JS::shadow::Zone
+#include "js/Value.h"
+#include "vm/GetterSetter.h"
+#include "vm/JSAtom.h"
+#include "vm/JSObject.h"
+#include "vm/Shape.h"
+#include "vm/StringType.h"
+
+namespace js {
+
+class PropertyResult;
+
+namespace gc {
+class TenuringTracer;
+} // namespace gc
+
+#ifdef ENABLE_RECORD_TUPLE
+// Defined in vm/RecordTupleShared.{h,cpp}. We cannot include that file
+// because it causes circular dependencies.
+extern bool IsExtendedPrimitiveWrapper(const JSObject& obj);
+#endif
+
+/*
+ * To really poison a set of values, using 'magic' or 'undefined' isn't good
+ * enough since often these will just be ignored by buggy code (see bug 629974)
+ * in debug builds and crash in release builds. Instead, we use a safe-for-crash
+ * pointer.
+ */
+static MOZ_ALWAYS_INLINE void Debug_SetValueRangeToCrashOnTouch(Value* beg,
+ Value* end) {
+#ifdef DEBUG
+ for (Value* v = beg; v != end; ++v) {
+ *v = js::PoisonedObjectValue(0x48);
+ }
+#endif
+}
+
+static MOZ_ALWAYS_INLINE void Debug_SetValueRangeToCrashOnTouch(Value* vec,
+ size_t len) {
+#ifdef DEBUG
+ Debug_SetValueRangeToCrashOnTouch(vec, vec + len);
+#endif
+}
+
+static MOZ_ALWAYS_INLINE void Debug_SetValueRangeToCrashOnTouch(
+ GCPtr<Value>* vec, size_t len) {
+#ifdef DEBUG
+ Debug_SetValueRangeToCrashOnTouch((Value*)vec, len);
+#endif
+}
+
+static MOZ_ALWAYS_INLINE void Debug_SetSlotRangeToCrashOnTouch(HeapSlot* vec,
+ uint32_t len) {
+#ifdef DEBUG
+ Debug_SetValueRangeToCrashOnTouch((Value*)vec, len);
+#endif
+}
+
+static MOZ_ALWAYS_INLINE void Debug_SetSlotRangeToCrashOnTouch(HeapSlot* begin,
+ HeapSlot* end) {
+#ifdef DEBUG
+ Debug_SetValueRangeToCrashOnTouch((Value*)begin, end - begin);
+#endif
+}
+
+class ArrayObject;
+
+/*
+ * ES6 20130308 draft 8.4.2.4 ArraySetLength.
+ *
+ * |id| must be "length", |desc| is the new non-accessor descriptor, and
+ * |result| receives an error code if the change is invalid.
+ */
+extern bool ArraySetLength(JSContext* cx, Handle<ArrayObject*> obj, HandleId id,
+ Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result);
+
+/*
+ * [SMDOC] NativeObject Elements layout
+ *
+ * Elements header used for native objects. The elements component of such
+ * objects offers an efficient representation for all or some of the indexed
+ * properties of the object, using a flat array of Values rather than a shape
+ * hierarchy stored in the object's slots. This structure is immediately
+ * followed by an array of elements, with the elements member in an object
+ * pointing to the beginning of that array (the end of this structure). See
+ * below for usage of this structure.
+ *
+ * The sets of properties represented by an object's elements and slots
+ * are disjoint. The elements contain only indexed properties, while the slots
+ * can contain both named and indexed properties; any indexes in the slots are
+ * distinct from those in the elements. If isIndexed() is false for an object,
+ * all indexed properties (if any) are stored in the dense elements.
+ *
+ * Indexes will be stored in the object's slots instead of its elements in
+ * the following case:
+ * - there are more than MIN_SPARSE_INDEX slots total and the load factor
+ * (COUNT / capacity) is less than 0.25
+ * - a property is defined that has non-default property attributes.
+ *
+ * We track these pieces of metadata for dense elements:
+ * - The length property as a uint32_t, accessible for array objects with
+ * ArrayObject::{length,setLength}(). This is unused for non-arrays.
+ * - The number of element slots (capacity), gettable with
+ * getDenseCapacity().
+ * - The array's initialized length, accessible with
+ * getDenseInitializedLength().
+ *
+ * Holes in the array are represented by MagicValue(JS_ELEMENTS_HOLE) values.
+ * These indicate indexes which are not dense properties of the array. The
+ * property may, however, be held by the object's properties.
+ *
+ * The capacity and length of an object's elements are almost entirely
+ * unrelated! In general the length may be greater than, less than, or equal
+ * to the capacity. The first case occurs with |new Array(100)|. The length
+ * is 100, but the capacity remains 0 (indices below length and above capacity
+ * must be treated as holes) until elements between capacity and length are
+ * set. The other two cases are common, depending upon the number of elements
+ * in an array and the underlying allocator used for element storage.
+ *
+ * The only case in which the capacity and length of an object's elements are
+ * related is when the object is an array with non-writable length. In this
+ * case the capacity is always less than or equal to the length. This permits
+ * JIT code to optimize away the check for non-writable length when assigning
+ * to possibly out-of-range elements: such code already has to check for
+ * |index < capacity|, and fallback code checks for non-writable length.
+ *
+ * The initialized length of an object specifies the number of elements that
+ * have been initialized. All elements above the initialized length are
+ * holes in the object, and the memory for all elements between the initialized
+ * length and capacity is left uninitialized. The initialized length is some
+ * value less than or equal to both the object's length and the object's
+ * capacity.
+ *
+ * There is flexibility in exactly the value the initialized length must hold,
+ * e.g. if an array has length 5, capacity 10, completely empty, it is valid
+ * for the initialized length to be any value between zero and 5, as long as
+ * the in memory values below the initialized length have been initialized with
+ * a hole value. However, in such cases we want to keep the initialized length
+ * as small as possible: if the object is known to have no hole values below
+ * its initialized length, then it is "packed" and can be accessed much faster
+ * by JIT code.
+ *
+ * Elements do not track property creation order, so enumerating the elements
+ * of an object does not necessarily visit indexes in the order they were
+ * created.
+ *
+ *
+ * [SMDOC] NativeObject shifted elements optimization
+ *
+ * Shifted elements
+ * ----------------
+ * It's pretty common to use an array as a queue, like this:
+ *
+ * while (arr.length > 0)
+ * foo(arr.shift());
+ *
+ * To ensure we don't get quadratic behavior on this, elements can be 'shifted'
+ * in memory. tryShiftDenseElements does this by incrementing elements_ to point
+ * to the next element and moving the ObjectElements header in memory (so it's
+ * stored where the shifted Value used to be).
+ *
+ * Shifted elements can be moved when we grow the array, when the array is
+ * made non-extensible (for simplicity, shifted elements are not supported on
+ * objects that are non-extensible, have copy-on-write elements, or on arrays
+ * with non-writable length).
+ */
+class ObjectElements {
+ public:
+ enum Flags : uint16_t {
+ // Elements are stored inline in the object allocation.
+ // An object allocated with the FIXED flag set can have the flag unset later
+ // if `growElements()` is called to increase the capacity beyond what was
+ // initially allocated. Once the flag is unset, it will remain so for the
+ // rest of the lifetime of the object.
+ FIXED = 0x1,
+
+ // Present only if these elements correspond to an array with
+ // non-writable length; never present for non-arrays.
+ NONWRITABLE_ARRAY_LENGTH = 0x2,
+
+#ifdef ENABLE_RECORD_TUPLE
+ // Records, Tuples and Boxes must be atomized before being hashed. We store
+ // the "is atomized" flag here for tuples, and in fixed slots for records
+ // and boxes.
+ TUPLE_IS_ATOMIZED = 0x4,
+#endif
+
+ // For TypedArrays only: this TypedArray's storage is mapping shared
+ // memory. This is a static property of the TypedArray, set when it
+ // is created and never changed.
+ SHARED_MEMORY = 0x8,
+
+ // These elements are not extensible. If this flag is set, the object's
+ // Shape must also have the NotExtensible flag. This exists on
+ // ObjectElements in addition to Shape to simplify JIT code.
+ NOT_EXTENSIBLE = 0x10,
+
+ // These elements are set to integrity level "sealed". If this flag is
+ // set, the NOT_EXTENSIBLE flag must be set as well.
+ SEALED = 0x20,
+
+ // These elements are set to integrity level "frozen". If this flag is
+ // set, the SEALED flag must be set as well.
+ //
+ // This flag must only be set if the Shape has the FrozenElements flag.
+ // The Shape flag ensures a shape guard can be used to guard against frozen
+ // elements. The ObjectElements flag is convenient for JIT code and
+ // ObjectElements assertions.
+ FROZEN = 0x40,
+
+ // If this flag is not set, the elements are guaranteed to contain no hole
+ // values (the JS_ELEMENTS_HOLE MagicValue) in [0, initializedLength).
+ NON_PACKED = 0x80,
+
+ // If this flag is not set, there's definitely no for-in iterator that
+ // covers these dense elements so elements can be deleted without calling
+ // SuppressDeletedProperty. This is used by fast paths for various Array
+ // builtins. See also NativeObject::denseElementsMaybeInIteration.
+ MAYBE_IN_ITERATION = 0x100,
+ };
+
+ // The flags word stores both the flags and the number of shifted elements.
+ // Allow shifting 2047 elements before actually moving the elements.
+ static const size_t NumShiftedElementsBits = 11;
+ static const size_t MaxShiftedElements = (1 << NumShiftedElementsBits) - 1;
+ static const size_t NumShiftedElementsShift = 32 - NumShiftedElementsBits;
+ static const size_t FlagsMask = (1 << NumShiftedElementsShift) - 1;
+ static_assert(MaxShiftedElements == 2047,
+ "MaxShiftedElements should match the comment");
+
+ private:
+ friend class ::JSObject;
+ friend class ArrayObject;
+ friend class NativeObject;
+ friend class gc::TenuringTracer;
+#ifdef ENABLE_RECORD_TUPLE
+ friend class TupleType;
+#endif
+
+ friend bool js::SetIntegrityLevel(JSContext* cx, HandleObject obj,
+ IntegrityLevel level);
+
+ friend bool ArraySetLength(JSContext* cx, Handle<ArrayObject*> obj,
+ HandleId id, Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result);
+
+ // The NumShiftedElementsBits high bits of this are used to store the
+ // number of shifted elements, the other bits are available for the flags.
+ // See Flags enum above.
+ uint32_t flags;
+
+ /*
+ * Number of initialized elements. This is <= the capacity, and for arrays
+ * is <= the length. Memory for elements above the initialized length is
+ * uninitialized, but values between the initialized length and the proper
+ * length are conceptually holes.
+ */
+ uint32_t initializedLength;
+
+ /* Number of allocated slots. */
+ uint32_t capacity;
+
+ /* 'length' property of array objects, unused for other objects. */
+ uint32_t length;
+
+ bool hasNonwritableArrayLength() const {
+ return flags & NONWRITABLE_ARRAY_LENGTH;
+ }
+ void setNonwritableArrayLength() {
+ // See ArrayObject::setNonWritableLength.
+ MOZ_ASSERT(capacity == initializedLength);
+ MOZ_ASSERT(numShiftedElements() == 0);
+ flags |= NONWRITABLE_ARRAY_LENGTH;
+ }
+
+#ifdef ENABLE_RECORD_TUPLE
+ void setTupleIsAtomized() { flags |= TUPLE_IS_ATOMIZED; }
+
+ bool tupleIsAtomized() const { return flags & TUPLE_IS_ATOMIZED; }
+#endif
+
+ void addShiftedElements(uint32_t count) {
+ MOZ_ASSERT(count < capacity);
+ MOZ_ASSERT(count < initializedLength);
+ MOZ_ASSERT(!(
+ flags & (NONWRITABLE_ARRAY_LENGTH | NOT_EXTENSIBLE | SEALED | FROZEN)));
+ uint32_t numShifted = numShiftedElements() + count;
+ MOZ_ASSERT(numShifted <= MaxShiftedElements);
+ flags = (numShifted << NumShiftedElementsShift) | (flags & FlagsMask);
+ capacity -= count;
+ initializedLength -= count;
+ }
+ void unshiftShiftedElements(uint32_t count) {
+ MOZ_ASSERT(count > 0);
+ MOZ_ASSERT(!(
+ flags & (NONWRITABLE_ARRAY_LENGTH | NOT_EXTENSIBLE | SEALED | FROZEN)));
+ uint32_t numShifted = numShiftedElements();
+ MOZ_ASSERT(count <= numShifted);
+ numShifted -= count;
+ flags = (numShifted << NumShiftedElementsShift) | (flags & FlagsMask);
+ capacity += count;
+ initializedLength += count;
+ }
+ void clearShiftedElements() {
+ flags &= FlagsMask;
+ MOZ_ASSERT(numShiftedElements() == 0);
+ }
+
+ void markNonPacked() { flags |= NON_PACKED; }
+
+ void markMaybeInIteration() { flags |= MAYBE_IN_ITERATION; }
+ bool maybeInIteration() { return flags & MAYBE_IN_ITERATION; }
+
+ void setNotExtensible() {
+ MOZ_ASSERT(!isNotExtensible());
+ flags |= NOT_EXTENSIBLE;
+ }
+ bool isNotExtensible() { return flags & NOT_EXTENSIBLE; }
+
+ void seal() {
+ MOZ_ASSERT(isNotExtensible());
+ MOZ_ASSERT(!isSealed());
+ MOZ_ASSERT(!isFrozen());
+ flags |= SEALED;
+ }
+ void freeze() {
+ MOZ_ASSERT(isNotExtensible());
+ MOZ_ASSERT(isSealed());
+ MOZ_ASSERT(!isFrozen());
+ flags |= FROZEN;
+ }
+
+ bool isFrozen() const { return flags & FROZEN; }
+
+ public:
+ constexpr ObjectElements(uint32_t capacity, uint32_t length)
+ : flags(0), initializedLength(0), capacity(capacity), length(length) {}
+
+ enum class SharedMemory { IsShared };
+
+ constexpr ObjectElements(uint32_t capacity, uint32_t length,
+ SharedMemory shmem)
+ : flags(SHARED_MEMORY),
+ initializedLength(0),
+ capacity(capacity),
+ length(length) {}
+
+ HeapSlot* elements() {
+ return reinterpret_cast<HeapSlot*>(uintptr_t(this) +
+ sizeof(ObjectElements));
+ }
+ const HeapSlot* elements() const {
+ return reinterpret_cast<const HeapSlot*>(uintptr_t(this) +
+ sizeof(ObjectElements));
+ }
+ static ObjectElements* fromElements(HeapSlot* elems) {
+ return reinterpret_cast<ObjectElements*>(uintptr_t(elems) -
+ sizeof(ObjectElements));
+ }
+
+ bool isSharedMemory() const { return flags & SHARED_MEMORY; }
+
+ static int offsetOfFlags() {
+ return int(offsetof(ObjectElements, flags)) - int(sizeof(ObjectElements));
+ }
+ static int offsetOfInitializedLength() {
+ return int(offsetof(ObjectElements, initializedLength)) -
+ int(sizeof(ObjectElements));
+ }
+ static int offsetOfCapacity() {
+ return int(offsetof(ObjectElements, capacity)) -
+ int(sizeof(ObjectElements));
+ }
+ static int offsetOfLength() {
+ return int(offsetof(ObjectElements, length)) - int(sizeof(ObjectElements));
+ }
+
+ static void PrepareForPreventExtensions(JSContext* cx, NativeObject* obj);
+ static void PreventExtensions(NativeObject* obj);
+ [[nodiscard]] static bool FreezeOrSeal(JSContext* cx,
+ Handle<NativeObject*> obj,
+ IntegrityLevel level);
+
+ bool isSealed() const { return flags & SEALED; }
+
+ bool isPacked() const { return !(flags & NON_PACKED); }
+
+ JS::PropertyAttributes elementAttributes() const {
+ if (isFrozen()) {
+ return {JS::PropertyAttribute::Enumerable};
+ }
+ if (isSealed()) {
+ return {JS::PropertyAttribute::Enumerable,
+ JS::PropertyAttribute::Writable};
+ }
+ return {JS::PropertyAttribute::Configurable,
+ JS::PropertyAttribute::Enumerable, JS::PropertyAttribute::Writable};
+ }
+
+ uint32_t numShiftedElements() const {
+ uint32_t numShifted = flags >> NumShiftedElementsShift;
+ MOZ_ASSERT_IF(numShifted > 0,
+ !(flags & (NONWRITABLE_ARRAY_LENGTH | NOT_EXTENSIBLE |
+ SEALED | FROZEN)));
+ return numShifted;
+ }
+
+ uint32_t numAllocatedElements() const {
+ return VALUES_PER_HEADER + capacity + numShiftedElements();
+ }
+
+ // This is enough slots to store an object of this class. See the static
+ // assertion below.
+ static const size_t VALUES_PER_HEADER = 2;
+};
+
+static_assert(ObjectElements::VALUES_PER_HEADER * sizeof(HeapSlot) ==
+ sizeof(ObjectElements),
+ "ObjectElements doesn't fit in the given number of slots");
+
+/*
+ * Slots header used for native objects. The header stores the capacity and the
+ * slot data follows in memory.
+ */
+class alignas(HeapSlot) ObjectSlots {
+ uint32_t capacity_;
+ uint32_t dictionarySlotSpan_;
+ uint64_t maybeUniqueId_;
+
+ public:
+ // Special values for maybeUniqueId_ to indicate no unique ID is present.
+ static constexpr uint64_t NoUniqueIdInDynamicSlots = 0;
+ static constexpr uint64_t NoUniqueIdInSharedEmptySlots = 1;
+ static constexpr uint64_t LastNoUniqueIdValue = NoUniqueIdInSharedEmptySlots;
+
+ static constexpr size_t VALUES_PER_HEADER = 2;
+
+ static inline size_t allocCount(size_t slotCount) {
+ static_assert(sizeof(ObjectSlots) ==
+ ObjectSlots::VALUES_PER_HEADER * sizeof(HeapSlot));
+#ifdef MOZ_VALGRIND
+ if (slotCount == 0) {
+ // Add an extra unused slot so that NativeObject::slots_ always points
+ // into the allocation otherwise valgrind thinks this is a leak.
+ slotCount = 1;
+ }
+#endif
+ return slotCount + VALUES_PER_HEADER;
+ }
+
+ static inline size_t allocSize(size_t slotCount) {
+ return allocCount(slotCount) * sizeof(HeapSlot);
+ }
+
+ static ObjectSlots* fromSlots(HeapSlot* slots) {
+ MOZ_ASSERT(slots);
+ return reinterpret_cast<ObjectSlots*>(uintptr_t(slots) -
+ sizeof(ObjectSlots));
+ }
+
+ static constexpr size_t offsetOfCapacity() {
+ return offsetof(ObjectSlots, capacity_);
+ }
+ static constexpr size_t offsetOfDictionarySlotSpan() {
+ return offsetof(ObjectSlots, dictionarySlotSpan_);
+ }
+ static constexpr size_t offsetOfMaybeUniqueId() {
+ return offsetof(ObjectSlots, maybeUniqueId_);
+ }
+ static constexpr size_t offsetOfSlots() { return sizeof(ObjectSlots); }
+
+ constexpr explicit ObjectSlots(uint32_t capacity, uint32_t dictionarySlotSpan,
+ uint64_t maybeUniqueId);
+
+ constexpr uint32_t capacity() const { return capacity_; }
+
+ constexpr uint32_t dictionarySlotSpan() const { return dictionarySlotSpan_; }
+
+ bool isSharedEmptySlots() const {
+ return maybeUniqueId_ == NoUniqueIdInSharedEmptySlots;
+ }
+
+ constexpr bool hasUniqueId() const {
+ return maybeUniqueId_ > LastNoUniqueIdValue;
+ }
+ uint64_t uniqueId() const {
+ MOZ_ASSERT(hasUniqueId());
+ return maybeUniqueId_;
+ }
+ uintptr_t maybeUniqueId() const { return hasUniqueId() ? maybeUniqueId_ : 0; }
+ void setUniqueId(uint64_t uid) {
+ MOZ_ASSERT(uid > LastNoUniqueIdValue);
+ MOZ_ASSERT(!isSharedEmptySlots());
+ maybeUniqueId_ = uid;
+ }
+
+ void setDictionarySlotSpan(uint32_t span) { dictionarySlotSpan_ = span; }
+
+ HeapSlot* slots() const {
+ return reinterpret_cast<HeapSlot*>(uintptr_t(this) + sizeof(ObjectSlots));
+ }
+};
+
+/*
+ * Shared singletons for objects with no elements.
+ * emptyObjectElementsShared is used only for TypedArrays, when the TA
+ * maps shared memory.
+ */
+extern HeapSlot* const emptyObjectElements;
+extern HeapSlot* const emptyObjectElementsShared;
+
+/*
+ * Shared singletons for objects with no dynamic slots.
+ */
+extern HeapSlot* const emptyObjectSlots;
+extern HeapSlot* const emptyObjectSlotsForDictionaryObject[];
+
+class AutoCheckShapeConsistency;
+class GCMarker;
+
+// Operations which change an object's dense elements can either succeed, fail,
+// or be unable to complete. The latter is used when the object's elements must
+// become sparse instead. The enum below is used for such operations.
+enum class DenseElementResult { Failure, Success, Incomplete };
+
+// Stores a slot offset in bytes relative to either the NativeObject* address
+// (if isFixedSlot) or to NativeObject::slots_ (if !isFixedSlot).
+class TaggedSlotOffset {
+ uint32_t bits_ = 0;
+
+ public:
+ static constexpr size_t OffsetShift = 1;
+ static constexpr size_t IsFixedSlotFlag = 0b1;
+
+ static constexpr size_t MaxOffset = SHAPE_MAXIMUM_SLOT * sizeof(Value);
+ static_assert((uint64_t(MaxOffset) << OffsetShift) <= UINT32_MAX,
+ "maximum slot offset must fit in TaggedSlotOffset");
+
+ constexpr TaggedSlotOffset() = default;
+
+ TaggedSlotOffset(uint32_t offset, bool isFixedSlot)
+ : bits_((offset << OffsetShift) | isFixedSlot) {
+ MOZ_ASSERT(offset <= MaxOffset);
+ }
+
+ uint32_t offset() const { return bits_ >> OffsetShift; }
+ bool isFixedSlot() const { return bits_ & IsFixedSlotFlag; }
+
+ bool operator==(const TaggedSlotOffset& other) const {
+ return bits_ == other.bits_;
+ }
+ bool operator!=(const TaggedSlotOffset& other) const {
+ return !(*this == other);
+ }
+};
+
+/*
+ * [SMDOC] NativeObject layout
+ *
+ * NativeObject specifies the internal implementation of a native object.
+ *
+ * Native objects use ShapedObject::shape to record property information. Two
+ * native objects with the same shape are guaranteed to have the same number of
+ * fixed slots.
+ *
+ * Native objects extend the base implementation of an object with storage for
+ * the object's named properties and indexed elements.
+ *
+ * These are stored separately from one another. Objects are followed by a
+ * variable-sized array of values for inline storage, which may be used by
+ * either properties of native objects (fixed slots), by elements (fixed
+ * elements), or by other data for certain kinds of objects, such as
+ * ArrayBufferObjects and TypedArrayObjects.
+ *
+ * Named property storage can be split between fixed slots and a dynamically
+ * allocated array (the slots member). For an object with N fixed slots, shapes
+ * with slots [0..N-1] are stored in the fixed slots, and the remainder are
+ * stored in the dynamic array. If all properties fit in the fixed slots, the
+ * 'slots_' member is nullptr.
+ *
+ * Elements are indexed via the 'elements_' member. This member can point to
+ * either the shared emptyObjectElements and emptyObjectElementsShared
+ * singletons, into the inline value array (the address of the third value, to
+ * leave room for a ObjectElements header;in this case numFixedSlots() is zero)
+ * or to a dynamically allocated array.
+ *
+ * Slots and elements may both be non-empty. The slots may be either names or
+ * indexes; no indexed property will be in both the slots and elements.
+ */
+class NativeObject : public JSObject {
+ protected:
+ /* Slots for object properties. */
+ js::HeapSlot* slots_;
+
+ /* Slots for object dense elements. */
+ js::HeapSlot* elements_;
+
+ friend class ::JSObject;
+
+ private:
+ static void staticAsserts() {
+ static_assert(sizeof(NativeObject) == sizeof(JSObject_Slots0),
+ "native object size must match GC thing size");
+ static_assert(sizeof(NativeObject) == sizeof(JS::shadow::Object),
+ "shadow interface must match actual implementation");
+ static_assert(sizeof(NativeObject) % sizeof(Value) == 0,
+ "fixed slots after an object must be aligned");
+
+ static_assert(offsetOfShape() == offsetof(JS::shadow::Object, shape),
+ "shadow type must match actual type");
+ static_assert(
+ offsetof(NativeObject, slots_) == offsetof(JS::shadow::Object, slots),
+ "shadow slots must match actual slots");
+ static_assert(
+ offsetof(NativeObject, elements_) == offsetof(JS::shadow::Object, _1),
+ "shadow placeholder must match actual elements");
+
+ static_assert(MAX_FIXED_SLOTS <= Shape::FIXED_SLOTS_MAX,
+ "verify numFixedSlots() bitfield is big enough");
+ static_assert(sizeof(NativeObject) + MAX_FIXED_SLOTS * sizeof(Value) ==
+ JSObject::MAX_BYTE_SIZE,
+ "inconsistent maximum object size");
+
+ // Sanity check NativeObject size is what we expect.
+#ifdef JS_64BIT
+ static_assert(sizeof(NativeObject) == 3 * sizeof(void*));
+#else
+ static_assert(sizeof(NativeObject) == 4 * sizeof(void*));
+#endif
+ }
+
+ public:
+ NativeShape* shape() const { return &JSObject::shape()->asNative(); }
+ SharedShape* sharedShape() const { return &shape()->asShared(); }
+ DictionaryShape* dictionaryShape() const { return &shape()->asDictionary(); }
+
+ PropertyInfoWithKey getLastProperty() const {
+ return shape()->lastProperty();
+ }
+
+ HeapSlotArray getDenseElements() const { return HeapSlotArray(elements_); }
+
+ const Value& getDenseElement(uint32_t idx) const {
+ MOZ_ASSERT(idx < getDenseInitializedLength());
+ return elements_[idx];
+ }
+ bool containsDenseElement(uint32_t idx) const {
+ return idx < getDenseInitializedLength() &&
+ !elements_[idx].isMagic(JS_ELEMENTS_HOLE);
+ }
+ uint32_t getDenseInitializedLength() const {
+ return getElementsHeader()->initializedLength;
+ }
+ uint32_t getDenseCapacity() const { return getElementsHeader()->capacity; }
+
+ bool isSharedMemory() const { return getElementsHeader()->isSharedMemory(); }
+
+ // Update the object's shape and allocate slots if needed to match the shape's
+ // slot span.
+ MOZ_ALWAYS_INLINE bool setShapeAndAddNewSlots(JSContext* cx,
+ SharedShape* newShape,
+ uint32_t oldSpan,
+ uint32_t newSpan);
+
+ // Methods optimized for adding/removing a single slot. Must only be used for
+ // non-dictionary objects.
+ MOZ_ALWAYS_INLINE bool setShapeAndAddNewSlot(JSContext* cx,
+ SharedShape* newShape,
+ uint32_t slot);
+ void setShapeAndRemoveLastSlot(JSContext* cx, SharedShape* newShape,
+ uint32_t slot);
+
+ MOZ_ALWAYS_INLINE bool canReuseShapeForNewProperties(
+ NativeShape* newShape) const {
+ NativeShape* oldShape = shape();
+ MOZ_ASSERT(oldShape->propMapLength() == 0,
+ "object must have no properties");
+ MOZ_ASSERT(newShape->propMapLength() > 0,
+ "new shape must have at least one property");
+ if (oldShape->numFixedSlots() != newShape->numFixedSlots()) {
+ return false;
+ }
+ if (oldShape->isDictionary() || newShape->isDictionary()) {
+ return false;
+ }
+ if (oldShape->base() != newShape->base()) {
+ return false;
+ }
+ MOZ_ASSERT(oldShape->getObjectClass() == newShape->getObjectClass());
+ MOZ_ASSERT(oldShape->proto() == newShape->proto());
+ MOZ_ASSERT(oldShape->realm() == newShape->realm());
+ // We only handle the common case where the old shape has no object flags
+ // (expected because it's an empty object) and the new shape has just the
+ // HasEnumerable flag that we can copy safely.
+ if (!oldShape->objectFlags().isEmpty()) {
+ return false;
+ }
+ MOZ_ASSERT(newShape->hasObjectFlag(ObjectFlag::HasEnumerable));
+ return newShape->objectFlags() == ObjectFlags({ObjectFlag::HasEnumerable});
+ }
+
+ // Newly-created TypedArrays that map a SharedArrayBuffer are
+ // marked as shared by giving them an ObjectElements that has the
+ // ObjectElements::SHARED_MEMORY flag set.
+ void setIsSharedMemory() {
+ MOZ_ASSERT(elements_ == emptyObjectElements);
+ elements_ = emptyObjectElementsShared;
+ }
+
+ inline bool isInWholeCellBuffer() const;
+
+ static inline NativeObject* create(JSContext* cx, gc::AllocKind kind,
+ gc::Heap heap, Handle<SharedShape*> shape,
+ gc::AllocSite* site = nullptr);
+
+#ifdef DEBUG
+ static void enableShapeConsistencyChecks();
+#endif
+
+ protected:
+#ifdef DEBUG
+ friend class js::AutoCheckShapeConsistency;
+ void checkShapeConsistency();
+#else
+ void checkShapeConsistency() {}
+#endif
+
+ void maybeFreeDictionaryPropSlots(JSContext* cx, DictionaryPropMap* map,
+ uint32_t mapLength);
+
+ [[nodiscard]] static bool toDictionaryMode(JSContext* cx,
+ Handle<NativeObject*> obj);
+
+ private:
+ inline void setEmptyDynamicSlots(uint32_t dictonarySlotSpan);
+
+ inline void setDictionaryModeSlotSpan(uint32_t span);
+
+ friend class gc::TenuringTracer;
+
+ // Given a slot range from |start| to |end| exclusive, call |fun| with
+ // pointers to the corresponding fixed slot and/or dynamic slot ranges.
+ template <typename Fun>
+ void forEachSlotRangeUnchecked(uint32_t start, uint32_t end, const Fun& fun) {
+ MOZ_ASSERT(end >= start);
+ uint32_t nfixed = numFixedSlots();
+ if (start < nfixed) {
+ HeapSlot* fixedStart = &fixedSlots()[start];
+ HeapSlot* fixedEnd = &fixedSlots()[std::min(nfixed, end)];
+ fun(fixedStart, fixedEnd);
+ start = nfixed;
+ }
+ if (end > nfixed) {
+ HeapSlot* dynStart = &slots_[start - nfixed];
+ HeapSlot* dynEnd = &slots_[end - nfixed];
+ fun(dynStart, dynEnd);
+ }
+ }
+
+ template <typename Fun>
+ void forEachSlotRange(uint32_t start, uint32_t end, const Fun& fun) {
+ MOZ_ASSERT(slotInRange(end, SENTINEL_ALLOWED));
+ forEachSlotRangeUnchecked(start, end, fun);
+ }
+
+ protected:
+ friend class DictionaryPropMap;
+ friend class GCMarker;
+ friend class Shape;
+
+ void invalidateSlotRange(uint32_t start, uint32_t end) {
+#ifdef DEBUG
+ forEachSlotRange(start, end, [](HeapSlot* slotsStart, HeapSlot* slotsEnd) {
+ Debug_SetSlotRangeToCrashOnTouch(slotsStart, slotsEnd);
+ });
+#endif /* DEBUG */
+ }
+
+ void initFixedSlots(uint32_t numSlots) {
+ MOZ_ASSERT(numSlots == numUsedFixedSlots());
+ HeapSlot* slots = fixedSlots();
+ for (uint32_t i = 0; i < numSlots; i++) {
+ slots[i].initAsUndefined();
+ }
+ }
+ void initDynamicSlots(uint32_t numSlots) {
+ MOZ_ASSERT(numSlots == sharedShape()->slotSpan() - numFixedSlots());
+ HeapSlot* slots = slots_;
+ for (uint32_t i = 0; i < numSlots; i++) {
+ slots[i].initAsUndefined();
+ }
+ }
+ void initSlots(uint32_t nfixed, uint32_t slotSpan) {
+ initFixedSlots(std::min(nfixed, slotSpan));
+ if (slotSpan > nfixed) {
+ initDynamicSlots(slotSpan - nfixed);
+ }
+ }
+
+#ifdef DEBUG
+ enum SentinelAllowed{SENTINEL_NOT_ALLOWED, SENTINEL_ALLOWED};
+
+ /*
+ * Check that slot is in range for the object's allocated slots.
+ * If sentinelAllowed then slot may equal the slot capacity.
+ */
+ bool slotInRange(uint32_t slot,
+ SentinelAllowed sentinel = SENTINEL_NOT_ALLOWED) const;
+
+ /*
+ * Check whether a slot is a fixed slot.
+ */
+ bool slotIsFixed(uint32_t slot) const;
+
+ /*
+ * Check whether the supplied number of fixed slots is correct.
+ */
+ bool isNumFixedSlots(uint32_t nfixed) const;
+#endif
+
+ /*
+ * Minimum size for dynamically allocated slots in normal Objects.
+ * ArrayObjects don't use this limit and can have a lower slot capacity,
+ * since they normally don't have a lot of slots.
+ */
+ static const uint32_t SLOT_CAPACITY_MIN = 8 - ObjectSlots::VALUES_PER_HEADER;
+
+ /*
+ * Minimum size for dynamically allocated elements in normal Objects.
+ */
+ static const uint32_t ELEMENT_CAPACITY_MIN =
+ 8 - ObjectElements::VALUES_PER_HEADER;
+
+ HeapSlot* fixedSlots() const {
+ return reinterpret_cast<HeapSlot*>(uintptr_t(this) + sizeof(NativeObject));
+ }
+
+ public:
+ inline void initEmptyDynamicSlots();
+
+ [[nodiscard]] static bool generateNewDictionaryShape(
+ JSContext* cx, Handle<NativeObject*> obj);
+
+ // The maximum number of slots in an object.
+ // |MAX_SLOTS_COUNT * sizeof(JS::Value)| shouldn't overflow
+ // int32_t (see slotsSizeMustNotOverflow).
+ static const uint32_t MAX_SLOTS_COUNT = (1 << 28) - 1;
+
+ static void slotsSizeMustNotOverflow() {
+ static_assert(
+ NativeObject::MAX_SLOTS_COUNT <= INT32_MAX / sizeof(JS::Value),
+ "every caller of this method requires that a slot "
+ "number (or slot count) count multiplied by "
+ "sizeof(Value) can't overflow uint32_t (and sometimes "
+ "int32_t, too)");
+ }
+
+ uint32_t numFixedSlots() const {
+ return reinterpret_cast<const JS::shadow::Object*>(this)->numFixedSlots();
+ }
+
+ // Get the number of fixed slots when the shape pointer may have been
+ // forwarded by a moving GC. You need to use this rather that
+ // numFixedSlots() in a trace hook if you access an object that is not the
+ // object being traced, since it may have a stale shape pointer.
+ inline uint32_t numFixedSlotsMaybeForwarded() const;
+
+ uint32_t numUsedFixedSlots() const {
+ uint32_t nslots = sharedShape()->slotSpan();
+ return std::min(nslots, numFixedSlots());
+ }
+
+ uint32_t slotSpan() const {
+ if (inDictionaryMode()) {
+ return dictionaryModeSlotSpan();
+ }
+ MOZ_ASSERT(getSlotsHeader()->dictionarySlotSpan() == 0);
+ return sharedShape()->slotSpan();
+ }
+
+ uint32_t dictionaryModeSlotSpan() const {
+ MOZ_ASSERT(inDictionaryMode());
+ return getSlotsHeader()->dictionarySlotSpan();
+ }
+
+ /* Whether a slot is at a fixed offset from this object. */
+ bool isFixedSlot(size_t slot) { return slot < numFixedSlots(); }
+
+ /* Index into the dynamic slots array to use for a dynamic slot. */
+ size_t dynamicSlotIndex(size_t slot) {
+ MOZ_ASSERT(slot >= numFixedSlots());
+ return slot - numFixedSlots();
+ }
+
+ // Native objects are never proxies. Call isExtensible instead.
+ bool nonProxyIsExtensible() const = delete;
+
+ bool isExtensible() const {
+#ifdef ENABLE_RECORD_TUPLE
+ if (IsExtendedPrimitiveWrapper(*this)) {
+ return false;
+ }
+#endif
+ return !hasFlag(ObjectFlag::NotExtensible);
+ }
+
+ /*
+ * Whether there may be indexed properties on this object, excluding any in
+ * the object's elements.
+ */
+ bool isIndexed() const { return hasFlag(ObjectFlag::Indexed); }
+
+ bool hasInterestingSymbol() const {
+ return hasFlag(ObjectFlag::HasInterestingSymbol);
+ }
+
+ bool hasEnumerableProperty() const {
+ return hasFlag(ObjectFlag::HasEnumerable);
+ }
+
+ static bool setHadGetterSetterChange(JSContext* cx,
+ Handle<NativeObject*> obj) {
+ return setFlag(cx, obj, ObjectFlag::HadGetterSetterChange);
+ }
+ bool hadGetterSetterChange() const {
+ return hasFlag(ObjectFlag::HadGetterSetterChange);
+ }
+
+ bool allocateInitialSlots(JSContext* cx, uint32_t capacity);
+
+ /*
+ * Grow or shrink slots immediately before changing the slot span.
+ * The number of allocated slots is not stored explicitly, and changes to
+ * the slots must track changes in the slot span.
+ */
+ bool growSlots(JSContext* cx, uint32_t oldCapacity, uint32_t newCapacity);
+ bool growSlotsForNewSlot(JSContext* cx, uint32_t numFixed, uint32_t slot);
+ void shrinkSlots(JSContext* cx, uint32_t oldCapacity, uint32_t newCapacity);
+
+ bool allocateSlots(JSContext* cx, uint32_t newCapacity);
+
+ /*
+ * This method is static because it's called from JIT code. On OOM, returns
+ * false without leaving a pending exception on the context.
+ */
+ static bool growSlotsPure(JSContext* cx, NativeObject* obj,
+ uint32_t newCapacity);
+
+ /*
+ * Like growSlotsPure but for dense elements. This will return
+ * false if we failed to allocate a dense element for some reason (OOM, too
+ * many dense elements, non-writable array length, etc).
+ */
+ static bool addDenseElementPure(JSContext* cx, NativeObject* obj);
+
+ /*
+ * Indicates whether this object has an ObjectSlots allocation attached. The
+ * capacity of this can be zero if it is only used to hold a unique ID.
+ */
+ bool hasDynamicSlots() const {
+ return !getSlotsHeader()->isSharedEmptySlots();
+ }
+
+ /* Compute the number of dynamic slots required for this object. */
+ MOZ_ALWAYS_INLINE uint32_t calculateDynamicSlots() const;
+
+ MOZ_ALWAYS_INLINE uint32_t numDynamicSlots() const;
+
+#ifdef DEBUG
+ uint32_t outOfLineNumDynamicSlots() const;
+#endif
+
+ bool empty() const { return shape()->propMapLength() == 0; }
+
+ mozilla::Maybe<PropertyInfo> lookup(JSContext* cx, jsid id);
+ mozilla::Maybe<PropertyInfo> lookup(JSContext* cx, PropertyName* name) {
+ return lookup(cx, NameToId(name));
+ }
+
+ bool contains(JSContext* cx, jsid id) { return lookup(cx, id).isSome(); }
+ bool contains(JSContext* cx, PropertyName* name) {
+ return lookup(cx, name).isSome();
+ }
+ bool contains(JSContext* cx, jsid id, PropertyInfo prop) {
+ mozilla::Maybe<PropertyInfo> found = lookup(cx, id);
+ return found.isSome() && *found == prop;
+ }
+
+ /* Contextless; can be called from other pure code. */
+ mozilla::Maybe<PropertyInfo> lookupPure(jsid id);
+ mozilla::Maybe<PropertyInfo> lookupPure(PropertyName* name) {
+ return lookupPure(NameToId(name));
+ }
+
+ bool containsPure(jsid id) { return lookupPure(id).isSome(); }
+ bool containsPure(PropertyName* name) { return containsPure(NameToId(name)); }
+ bool containsPure(jsid id, PropertyInfo prop) {
+ mozilla::Maybe<PropertyInfo> found = lookupPure(id);
+ return found.isSome() && *found == prop;
+ }
+
+ private:
+ /*
+ * Allocate and free an object slot.
+ *
+ * FIXME: bug 593129 -- slot allocation should be done by object methods
+ * after calling object-parameter-free shape methods, avoiding coupling
+ * logic across the object vs. shape module wall.
+ */
+ static bool allocDictionarySlot(JSContext* cx, Handle<NativeObject*> obj,
+ uint32_t* slotp);
+
+ void freeDictionarySlot(uint32_t slot);
+
+ static MOZ_ALWAYS_INLINE bool maybeConvertToDictionaryForAdd(
+ JSContext* cx, Handle<NativeObject*> obj);
+
+ public:
+ // Add a new property. Must only be used when the |id| is not already present
+ // in the object's shape. Checks for non-extensibility must be done by the
+ // callers.
+ static bool addProperty(JSContext* cx, Handle<NativeObject*> obj, HandleId id,
+ PropertyFlags flags, uint32_t* slotOut);
+
+ static bool addProperty(JSContext* cx, Handle<NativeObject*> obj,
+ Handle<PropertyName*> name, PropertyFlags flags,
+ uint32_t* slotOut) {
+ RootedId id(cx, NameToId(name));
+ return addProperty(cx, obj, id, flags, slotOut);
+ }
+
+ static bool addPropertyInReservedSlot(JSContext* cx,
+ Handle<NativeObject*> obj, HandleId id,
+ uint32_t slot, PropertyFlags flags);
+ static bool addPropertyInReservedSlot(JSContext* cx,
+ Handle<NativeObject*> obj,
+ Handle<PropertyName*> name,
+ uint32_t slot, PropertyFlags flags) {
+ RootedId id(cx, NameToId(name));
+ return addPropertyInReservedSlot(cx, obj, id, slot, flags);
+ }
+
+ static bool addCustomDataProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, PropertyFlags flags);
+
+ // Change a property with key |id| in this object. The object must already
+ // have a property (stored in the shape tree) with this |id|.
+ static bool changeProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, PropertyFlags flags,
+ uint32_t* slotOut);
+
+ static bool changeCustomDataPropAttributes(JSContext* cx,
+ Handle<NativeObject*> obj,
+ HandleId id, PropertyFlags flags);
+
+ // Remove the property named by id from this object.
+ static bool removeProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id);
+
+ static bool freezeOrSealProperties(JSContext* cx, Handle<NativeObject*> obj,
+ IntegrityLevel level);
+
+ protected:
+ static bool changeNumFixedSlotsAfterSwap(JSContext* cx,
+ Handle<NativeObject*> obj,
+ uint32_t nfixed);
+
+ // For use from JSObject::swap.
+ [[nodiscard]] bool prepareForSwap(JSContext* cx,
+ MutableHandleValueVector slotValuesOut);
+ [[nodiscard]] static bool fixupAfterSwap(JSContext* cx,
+ Handle<NativeObject*> obj,
+ gc::AllocKind kind,
+ HandleValueVector slotValues);
+
+ public:
+ // Return true if this object has been converted from shared-immutable
+ // shapes to object-owned dictionary shapes.
+ bool inDictionaryMode() const { return shape()->isDictionary(); }
+
+ const Value& getSlot(uint32_t slot) const {
+ MOZ_ASSERT(slotInRange(slot));
+ uint32_t fixed = numFixedSlots();
+ if (slot < fixed) {
+ return fixedSlots()[slot];
+ }
+ return slots_[slot - fixed];
+ }
+
+ const HeapSlot* getSlotAddressUnchecked(uint32_t slot) const {
+ uint32_t fixed = numFixedSlots();
+ if (slot < fixed) {
+ return fixedSlots() + slot;
+ }
+ return slots_ + (slot - fixed);
+ }
+
+ HeapSlot* getSlotAddressUnchecked(uint32_t slot) {
+ uint32_t fixed = numFixedSlots();
+ if (slot < fixed) {
+ return fixedSlots() + slot;
+ }
+ return slots_ + (slot - fixed);
+ }
+
+ HeapSlot* getSlotAddress(uint32_t slot) {
+ /*
+ * This can be used to get the address of the end of the slots for the
+ * object, which may be necessary when fetching zero-length arrays of
+ * slots (e.g. for callObjVarArray).
+ */
+ MOZ_ASSERT(slotInRange(slot, SENTINEL_ALLOWED));
+ return getSlotAddressUnchecked(slot);
+ }
+
+ const HeapSlot* getSlotAddress(uint32_t slot) const {
+ /*
+ * This can be used to get the address of the end of the slots for the
+ * object, which may be necessary when fetching zero-length arrays of
+ * slots (e.g. for callObjVarArray).
+ */
+ MOZ_ASSERT(slotInRange(slot, SENTINEL_ALLOWED));
+ return getSlotAddressUnchecked(slot);
+ }
+
+ MOZ_ALWAYS_INLINE HeapSlot& getSlotRef(uint32_t slot) {
+ MOZ_ASSERT(slotInRange(slot));
+ return *getSlotAddress(slot);
+ }
+
+ MOZ_ALWAYS_INLINE const HeapSlot& getSlotRef(uint32_t slot) const {
+ MOZ_ASSERT(slotInRange(slot));
+ return *getSlotAddress(slot);
+ }
+
+ // Check requirements on values stored to this object.
+ MOZ_ALWAYS_INLINE void checkStoredValue(const Value& v) {
+ MOZ_ASSERT(IsObjectValueInCompartment(v, compartment()));
+ MOZ_ASSERT(AtomIsMarked(zoneFromAnyThread(), v));
+ MOZ_ASSERT_IF(v.isMagic() && v.whyMagic() == JS_ELEMENTS_HOLE,
+ !denseElementsArePacked());
+ }
+
+ MOZ_ALWAYS_INLINE void setSlot(uint32_t slot, const Value& value) {
+ MOZ_ASSERT(slotInRange(slot));
+ checkStoredValue(value);
+ getSlotRef(slot).set(this, HeapSlot::Slot, slot, value);
+ }
+
+ MOZ_ALWAYS_INLINE void initSlot(uint32_t slot, const Value& value) {
+ MOZ_ASSERT(getSlot(slot).isUndefined());
+ MOZ_ASSERT(slotInRange(slot));
+ checkStoredValue(value);
+ initSlotUnchecked(slot, value);
+ }
+
+ MOZ_ALWAYS_INLINE void initSlotUnchecked(uint32_t slot, const Value& value) {
+ getSlotAddressUnchecked(slot)->init(this, HeapSlot::Slot, slot, value);
+ }
+
+ // Returns the GetterSetter for an accessor property.
+ GetterSetter* getGetterSetter(uint32_t slot) const {
+ return getSlot(slot).toGCThing()->as<GetterSetter>();
+ }
+ GetterSetter* getGetterSetter(PropertyInfo prop) const {
+ MOZ_ASSERT(prop.isAccessorProperty());
+ return getGetterSetter(prop.slot());
+ }
+
+ // Returns the (possibly nullptr) getter or setter object. |prop| and |slot|
+ // must be (for) an accessor property.
+ JSObject* getGetter(uint32_t slot) const {
+ return getGetterSetter(slot)->getter();
+ }
+ JSObject* getGetter(PropertyInfo prop) const {
+ return getGetterSetter(prop)->getter();
+ }
+ JSObject* getSetter(PropertyInfo prop) const {
+ return getGetterSetter(prop)->setter();
+ }
+
+ // Returns true if the property has a non-nullptr getter or setter object.
+ // |prop| can be any property.
+ bool hasGetter(PropertyInfo prop) const {
+ return prop.isAccessorProperty() && getGetter(prop);
+ }
+ bool hasSetter(PropertyInfo prop) const {
+ return prop.isAccessorProperty() && getSetter(prop);
+ }
+
+ // If the property has a non-nullptr getter/setter, return it as ObjectValue.
+ // Else return |undefined|. |prop| must be an accessor property.
+ Value getGetterValue(PropertyInfo prop) const {
+ MOZ_ASSERT(prop.isAccessorProperty());
+ if (JSObject* getterObj = getGetter(prop)) {
+ return ObjectValue(*getterObj);
+ }
+ return UndefinedValue();
+ }
+ Value getSetterValue(PropertyInfo prop) const {
+ MOZ_ASSERT(prop.isAccessorProperty());
+ if (JSObject* setterObj = getSetter(prop)) {
+ return ObjectValue(*setterObj);
+ }
+ return UndefinedValue();
+ }
+
+ [[nodiscard]] bool setUniqueId(JSContext* cx, uint64_t uid);
+ inline bool hasUniqueId() const { return getSlotsHeader()->hasUniqueId(); }
+ inline uint64_t uniqueId() const { return getSlotsHeader()->uniqueId(); }
+ inline uint64_t maybeUniqueId() const {
+ return getSlotsHeader()->maybeUniqueId();
+ }
+ bool setOrUpdateUniqueId(JSContext* cx, uint64_t uid);
+
+ // MAX_FIXED_SLOTS is the biggest number of fixed slots our GC
+ // size classes will give an object.
+ static constexpr uint32_t MAX_FIXED_SLOTS =
+ JS::shadow::Object::MAX_FIXED_SLOTS;
+
+ private:
+ void prepareElementRangeForOverwrite(size_t start, size_t end) {
+ MOZ_ASSERT(end <= getDenseInitializedLength());
+ for (size_t i = start; i < end; i++) {
+ elements_[i].destroy();
+ }
+ }
+
+ /*
+ * Trigger the write barrier on a range of slots that will no longer be
+ * reachable.
+ */
+ void prepareSlotRangeForOverwrite(size_t start, size_t end) {
+ for (size_t i = start; i < end; i++) {
+ getSlotAddressUnchecked(i)->destroy();
+ }
+ }
+
+ inline void shiftDenseElementsUnchecked(uint32_t count);
+
+ // Like getSlotRef, but optimized for reserved slots. This relies on the fact
+ // that the first reserved slots (up to MAX_FIXED_SLOTS) are always stored in
+ // fixed slots. This lets the compiler optimize away the branch below when
+ // |index| is a constant (after inlining).
+ //
+ // Note: objects that may be swapped have less predictable slot layouts
+ // because they could have been swapped with an object with fewer fixed slots.
+ // Fortunately, the only native objects that can be swapped are DOM objects
+ // and these shouldn't end up here (asserted below).
+ MOZ_ALWAYS_INLINE HeapSlot& getReservedSlotRef(uint32_t index) {
+ MOZ_ASSERT(index < JSSLOT_FREE(getClass()));
+ MOZ_ASSERT(slotIsFixed(index) == (index < MAX_FIXED_SLOTS));
+ MOZ_ASSERT(!ObjectMayBeSwapped(this));
+ return index < MAX_FIXED_SLOTS ? fixedSlots()[index]
+ : slots_[index - MAX_FIXED_SLOTS];
+ }
+ MOZ_ALWAYS_INLINE const HeapSlot& getReservedSlotRef(uint32_t index) const {
+ MOZ_ASSERT(index < JSSLOT_FREE(getClass()));
+ MOZ_ASSERT(slotIsFixed(index) == (index < MAX_FIXED_SLOTS));
+ MOZ_ASSERT(!ObjectMayBeSwapped(this));
+ return index < MAX_FIXED_SLOTS ? fixedSlots()[index]
+ : slots_[index - MAX_FIXED_SLOTS];
+ }
+
+ public:
+ MOZ_ALWAYS_INLINE const Value& getReservedSlot(uint32_t index) const {
+ return getReservedSlotRef(index);
+ }
+ MOZ_ALWAYS_INLINE void initReservedSlot(uint32_t index, const Value& v) {
+ MOZ_ASSERT(getReservedSlot(index).isUndefined());
+ checkStoredValue(v);
+ getReservedSlotRef(index).init(this, HeapSlot::Slot, index, v);
+ }
+ MOZ_ALWAYS_INLINE void setReservedSlot(uint32_t index, const Value& v) {
+ checkStoredValue(v);
+ getReservedSlotRef(index).set(this, HeapSlot::Slot, index, v);
+ }
+
+ // For slots which are known to always be fixed, due to the way they are
+ // allocated.
+
+ HeapSlot& getFixedSlotRef(uint32_t slot) {
+ MOZ_ASSERT(slotIsFixed(slot));
+ return fixedSlots()[slot];
+ }
+
+ const Value& getFixedSlot(uint32_t slot) const {
+ MOZ_ASSERT(slotIsFixed(slot));
+ return fixedSlots()[slot];
+ }
+
+ const Value& getDynamicSlot(uint32_t dynamicSlotIndex) const {
+ MOZ_ASSERT(dynamicSlotIndex < outOfLineNumDynamicSlots());
+ return slots_[dynamicSlotIndex];
+ }
+
+ void setFixedSlot(uint32_t slot, const Value& value) {
+ MOZ_ASSERT(slotIsFixed(slot));
+ checkStoredValue(value);
+ fixedSlots()[slot].set(this, HeapSlot::Slot, slot, value);
+ }
+
+ void setDynamicSlot(uint32_t numFixed, uint32_t slot, const Value& value) {
+ MOZ_ASSERT(numFixedSlots() == numFixed);
+ MOZ_ASSERT(slot >= numFixed);
+ MOZ_ASSERT(slot - numFixed < getSlotsHeader()->capacity());
+ checkStoredValue(value);
+ slots_[slot - numFixed].set(this, HeapSlot::Slot, slot, value);
+ }
+
+ void initFixedSlot(uint32_t slot, const Value& value) {
+ MOZ_ASSERT(slotIsFixed(slot));
+ checkStoredValue(value);
+ fixedSlots()[slot].init(this, HeapSlot::Slot, slot, value);
+ }
+
+ void initDynamicSlot(uint32_t numFixed, uint32_t slot, const Value& value) {
+ MOZ_ASSERT(numFixedSlots() == numFixed);
+ MOZ_ASSERT(slot >= numFixed);
+ MOZ_ASSERT(slot - numFixed < getSlotsHeader()->capacity());
+ checkStoredValue(value);
+ slots_[slot - numFixed].init(this, HeapSlot::Slot, slot, value);
+ }
+
+ template <typename T>
+ T* maybePtrFromReservedSlot(uint32_t slot) const {
+ Value v = getReservedSlot(slot);
+ return v.isUndefined() ? nullptr : static_cast<T*>(v.toPrivate());
+ }
+
+ /*
+ * Calculate the number of dynamic slots to allocate to cover the properties
+ * in an object with the given number of fixed slots and slot span.
+ */
+ static MOZ_ALWAYS_INLINE uint32_t calculateDynamicSlots(uint32_t nfixed,
+ uint32_t span,
+ const JSClass* clasp);
+ static MOZ_ALWAYS_INLINE uint32_t calculateDynamicSlots(SharedShape* shape);
+
+ ObjectSlots* getSlotsHeader() const { return ObjectSlots::fromSlots(slots_); }
+
+ /* Elements accessors. */
+
+ // The maximum size, in sizeof(Value), of the allocation used for an
+ // object's dense elements. (This includes space used to store an
+ // ObjectElements instance.)
+ // |MAX_DENSE_ELEMENTS_ALLOCATION * sizeof(JS::Value)| shouldn't overflow
+ // int32_t (see elementsSizeMustNotOverflow).
+ static const uint32_t MAX_DENSE_ELEMENTS_ALLOCATION = (1 << 28) - 1;
+
+ // The maximum number of usable dense elements in an object.
+ static const uint32_t MAX_DENSE_ELEMENTS_COUNT =
+ MAX_DENSE_ELEMENTS_ALLOCATION - ObjectElements::VALUES_PER_HEADER;
+
+ static void elementsSizeMustNotOverflow() {
+ static_assert(
+ NativeObject::MAX_DENSE_ELEMENTS_COUNT <= INT32_MAX / sizeof(JS::Value),
+ "every caller of this method require that an element "
+ "count multiplied by sizeof(Value) can't overflow "
+ "uint32_t (and sometimes int32_t ,too)");
+ }
+
+ ObjectElements* getElementsHeader() const {
+ return ObjectElements::fromElements(elements_);
+ }
+
+ // Returns a pointer to the first element, including shifted elements.
+ inline HeapSlot* unshiftedElements() const {
+ return elements_ - getElementsHeader()->numShiftedElements();
+ }
+
+ // Like getElementsHeader, but returns a pointer to the unshifted header.
+ // This is mainly useful for free()ing dynamic elements: the pointer
+ // returned here is the one we got from malloc.
+ void* getUnshiftedElementsHeader() const {
+ return ObjectElements::fromElements(unshiftedElements());
+ }
+
+ uint32_t unshiftedIndex(uint32_t index) const {
+ return index + getElementsHeader()->numShiftedElements();
+ }
+
+ /* Accessors for elements. */
+ bool ensureElements(JSContext* cx, uint32_t capacity) {
+ MOZ_ASSERT(isExtensible());
+ if (capacity > getDenseCapacity()) {
+ return growElements(cx, capacity);
+ }
+ return true;
+ }
+
+ // Try to shift |count| dense elements, see the "Shifted elements" comment.
+ inline bool tryShiftDenseElements(uint32_t count);
+
+ // Try to make space for |count| dense elements at the start of the array.
+ bool tryUnshiftDenseElements(uint32_t count);
+
+ // Move the elements header and all shifted elements to the start of the
+ // allocated elements space, so that numShiftedElements is 0 afterwards.
+ void moveShiftedElements();
+
+ // If this object has many shifted elements call moveShiftedElements.
+ void maybeMoveShiftedElements();
+
+ static bool goodElementsAllocationAmount(JSContext* cx, uint32_t reqAllocated,
+ uint32_t length,
+ uint32_t* goodAmount);
+ bool growElements(JSContext* cx, uint32_t newcap);
+ void shrinkElements(JSContext* cx, uint32_t cap);
+
+ private:
+ // Run a post write barrier that encompasses multiple contiguous elements in a
+ // single step.
+ inline void elementsRangePostWriteBarrier(uint32_t start, uint32_t count);
+
+ public:
+ void shrinkCapacityToInitializedLength(JSContext* cx);
+
+ private:
+ void setDenseInitializedLengthInternal(uint32_t length) {
+ MOZ_ASSERT(length <= getDenseCapacity());
+ MOZ_ASSERT(!denseElementsAreFrozen());
+ prepareElementRangeForOverwrite(length,
+ getElementsHeader()->initializedLength);
+ getElementsHeader()->initializedLength = length;
+ }
+
+ public:
+ void setDenseInitializedLength(uint32_t length) {
+ MOZ_ASSERT(isExtensible());
+ setDenseInitializedLengthInternal(length);
+ }
+
+ void setDenseInitializedLengthMaybeNonExtensible(JSContext* cx,
+ uint32_t length) {
+ setDenseInitializedLengthInternal(length);
+ if (!isExtensible()) {
+ shrinkCapacityToInitializedLength(cx);
+ }
+ }
+
+ inline void ensureDenseInitializedLength(uint32_t index, uint32_t extra);
+
+ void setDenseElement(uint32_t index, const Value& val) {
+ MOZ_ASSERT_IF(val.isMagic(), val.whyMagic() != JS_ELEMENTS_HOLE);
+ setDenseElementUnchecked(index, val);
+ }
+
+ void initDenseElement(uint32_t index, const Value& val) {
+ MOZ_ASSERT(!val.isMagic(JS_ELEMENTS_HOLE));
+ initDenseElementUnchecked(index, val);
+ }
+
+ private:
+ // Note: 'Unchecked' here means we don't assert |val| isn't the hole
+ // MagicValue.
+ void initDenseElementUnchecked(uint32_t index, const Value& val) {
+ MOZ_ASSERT(index < getDenseInitializedLength());
+ MOZ_ASSERT(isExtensible());
+ checkStoredValue(val);
+ elements_[index].init(this, HeapSlot::Element, unshiftedIndex(index), val);
+ }
+ void setDenseElementUnchecked(uint32_t index, const Value& val) {
+ MOZ_ASSERT(index < getDenseInitializedLength());
+ MOZ_ASSERT(!denseElementsAreFrozen());
+ checkStoredValue(val);
+ elements_[index].set(this, HeapSlot::Element, unshiftedIndex(index), val);
+ }
+
+ // Mark the dense elements as possibly containing holes.
+ inline void markDenseElementsNotPacked();
+
+ public:
+ inline void initDenseElementHole(uint32_t index);
+ inline void setDenseElementHole(uint32_t index);
+ inline void removeDenseElementForSparseIndex(uint32_t index);
+
+ inline void copyDenseElements(uint32_t dstStart, const Value* src,
+ uint32_t count);
+
+ inline void initDenseElements(const Value* src, uint32_t count);
+ inline void initDenseElements(NativeObject* src, uint32_t srcStart,
+ uint32_t count);
+
+ // Copy the first `count` dense elements from `src` to `this`, starting at
+ // `destStart`. The initialized length must already include the new elements.
+ inline void initDenseElementRange(uint32_t destStart, NativeObject* src,
+ uint32_t count);
+
+ // Store the Values in the range [begin, end) as elements of this array.
+ //
+ // Preconditions: This must be a boring ArrayObject with dense initialized
+ // length 0: no shifted elements, no frozen elements, no fixed "length", not
+ // indexed, not inextensible, not copy-on-write. Existing capacity is
+ // optional.
+ //
+ // This runs write barriers but does not update types. `end - begin` must
+ // return the size of the range, which must be >= 0 and fit in an int32_t.
+ template <typename Iter>
+ [[nodiscard]] inline bool initDenseElementsFromRange(JSContext* cx,
+ Iter begin, Iter end);
+
+ inline void moveDenseElements(uint32_t dstStart, uint32_t srcStart,
+ uint32_t count);
+ inline void reverseDenseElementsNoPreBarrier(uint32_t length);
+
+ inline DenseElementResult setOrExtendDenseElements(JSContext* cx,
+ uint32_t start,
+ const Value* vp,
+ uint32_t count);
+
+ bool denseElementsAreSealed() const {
+ return getElementsHeader()->isSealed();
+ }
+ bool denseElementsAreFrozen() const {
+ return hasFlag(ObjectFlag::FrozenElements);
+ }
+
+ bool denseElementsArePacked() const {
+ return getElementsHeader()->isPacked();
+ }
+
+ void markDenseElementsMaybeInIteration() {
+ getElementsHeader()->markMaybeInIteration();
+ }
+
+ // Return whether the object's dense elements might be in the midst of for-in
+ // iteration. We rely on this to be able to safely delete or move dense array
+ // elements without worrying about updating in-progress iterators.
+ // See bug 690622.
+ //
+ // Note that it's fine to return false if this object is on the prototype of
+ // another object: SuppressDeletedProperty only suppresses properties deleted
+ // from the iterated object itself.
+ inline bool denseElementsHaveMaybeInIterationFlag();
+ inline bool denseElementsMaybeInIteration();
+
+ // Ensures that the object can hold at least index + extra elements. This
+ // returns DenseElement_Success on success, DenseElement_Failed on failure
+ // to grow the array, or DenseElement_Incomplete when the object is too
+ // sparse to grow (this includes the case of index + extra overflow). In
+ // the last two cases the object is kept intact.
+ inline DenseElementResult ensureDenseElements(JSContext* cx, uint32_t index,
+ uint32_t extra);
+
+ inline DenseElementResult extendDenseElements(JSContext* cx,
+ uint32_t requiredCapacity,
+ uint32_t extra);
+
+ /* Small objects are dense, no matter what. */
+ static const uint32_t MIN_SPARSE_INDEX = 1000;
+
+ /*
+ * Element storage for an object will be sparse if fewer than 1/8 indexes
+ * are filled in.
+ */
+ static const unsigned SPARSE_DENSITY_RATIO = 8;
+
+ /*
+ * Check if after growing the object's elements will be too sparse.
+ * newElementsHint is an estimated number of elements to be added.
+ */
+ bool willBeSparseElements(uint32_t requiredCapacity,
+ uint32_t newElementsHint);
+
+ /*
+ * After adding a sparse index to obj, see if it should be converted to use
+ * dense elements.
+ */
+ static DenseElementResult maybeDensifySparseElements(
+ JSContext* cx, Handle<NativeObject*> obj);
+ static bool densifySparseElements(JSContext* cx, Handle<NativeObject*> obj);
+
+ inline HeapSlot* fixedElements() const {
+ static_assert(2 * sizeof(Value) == sizeof(ObjectElements),
+ "when elements are stored inline, the first two "
+ "slots will hold the ObjectElements header");
+ return &fixedSlots()[2];
+ }
+
+#ifdef DEBUG
+ bool canHaveNonEmptyElements();
+#endif
+
+ void setEmptyElements() { elements_ = emptyObjectElements; }
+
+ void initFixedElements(gc::AllocKind kind, uint32_t length);
+
+ // Update the elements pointer to use the fixed elements storage. The caller
+ // is responsible for initializing the elements themselves and setting the
+ // FIXED flag.
+ void setFixedElements(uint32_t numShifted = 0) {
+ MOZ_ASSERT(canHaveNonEmptyElements());
+ elements_ = fixedElements() + numShifted;
+ }
+
+ inline bool hasDynamicElements() const {
+ /*
+ * Note: for objects with zero fixed slots this could potentially give
+ * a spurious 'true' result, if the end of this object is exactly
+ * aligned with the end of its arena and dynamic slots are allocated
+ * immediately afterwards. Such cases cannot occur for dense arrays
+ * (which have at least two fixed slots) and can only result in a leak.
+ */
+ return !hasEmptyElements() && !hasFixedElements();
+ }
+
+ inline bool hasFixedElements() const {
+ bool fixed = getElementsHeader()->flags & ObjectElements::FIXED;
+ MOZ_ASSERT_IF(fixed, unshiftedElements() == fixedElements());
+ return fixed;
+ }
+
+ inline bool hasEmptyElements() const {
+ return elements_ == emptyObjectElements ||
+ elements_ == emptyObjectElementsShared;
+ }
+
+ /*
+ * Get a pointer to the unused data in the object's allocation immediately
+ * following this object, for use with objects which allocate a larger size
+ * class than they need and store non-elements data inline.
+ */
+ inline uint8_t* fixedData(size_t nslots) const;
+
+ inline void privatePreWriteBarrier(HeapSlot* pprivate);
+
+ // The methods below are used to store GC things in a reserved slot as
+ // PrivateValues. This is done to bypass the normal tracing code (debugger
+ // objects use this to store cross-compartment pointers).
+ //
+ // WARNING: make sure you REALLY need this and you know what you're doing
+ // before using these methods!
+ void setReservedSlotGCThingAsPrivate(uint32_t slot, gc::Cell* cell) {
+#ifdef DEBUG
+ if (IsMarkedBlack(this)) {
+ JS::AssertCellIsNotGray(cell);
+ }
+#endif
+ HeapSlot* pslot = getSlotAddress(slot);
+ Cell* prev = nullptr;
+ if (!pslot->isUndefined()) {
+ prev = static_cast<gc::Cell*>(pslot->toPrivate());
+ privatePreWriteBarrier(pslot);
+ }
+ setReservedSlotGCThingAsPrivateUnbarriered(slot, cell);
+ gc::PostWriteBarrierCell(this, prev, cell);
+ }
+ void setReservedSlotGCThingAsPrivateUnbarriered(uint32_t slot,
+ gc::Cell* cell) {
+ MOZ_ASSERT(slot < JSCLASS_RESERVED_SLOTS(getClass()));
+ MOZ_ASSERT(cell);
+ getReservedSlotRef(slot).unbarrieredSet(PrivateValue(cell));
+ }
+ void clearReservedSlotGCThingAsPrivate(uint32_t slot) {
+ MOZ_ASSERT(slot < JSCLASS_RESERVED_SLOTS(getClass()));
+ HeapSlot* pslot = &getReservedSlotRef(slot);
+ if (!pslot->isUndefined()) {
+ privatePreWriteBarrier(pslot);
+ pslot->unbarrieredSet(UndefinedValue());
+ }
+ }
+
+ /* Return the allocKind we would use if we were to tenure this object. */
+ inline js::gc::AllocKind allocKindForTenure() const;
+
+ // Native objects are never wrappers, so a native object always has a realm
+ // and global.
+ JS::Realm* realm() const { return nonCCWRealm(); }
+ inline js::GlobalObject& global() const;
+
+ TaggedSlotOffset getTaggedSlotOffset(size_t slot) const {
+ MOZ_ASSERT(slot < slotSpan());
+ uint32_t nfixed = numFixedSlots();
+ if (slot < nfixed) {
+ return TaggedSlotOffset(getFixedSlotOffset(slot),
+ /* isFixedSlot = */ true);
+ }
+ return TaggedSlotOffset((slot - nfixed) * sizeof(Value),
+ /* isFixedSlot = */ false);
+ }
+
+ /* JIT Accessors */
+ static size_t offsetOfElements() { return offsetof(NativeObject, elements_); }
+ static size_t offsetOfFixedElements() {
+ return sizeof(NativeObject) + sizeof(ObjectElements);
+ }
+
+ static constexpr size_t getFixedSlotOffset(size_t slot) {
+ MOZ_ASSERT(slot < MAX_FIXED_SLOTS);
+ return sizeof(NativeObject) + slot * sizeof(Value);
+ }
+ static constexpr size_t getFixedSlotIndexFromOffset(size_t offset) {
+ MOZ_ASSERT(offset >= sizeof(NativeObject));
+ offset -= sizeof(NativeObject);
+ MOZ_ASSERT(offset % sizeof(Value) == 0);
+ MOZ_ASSERT(offset / sizeof(Value) < MAX_FIXED_SLOTS);
+ return offset / sizeof(Value);
+ }
+ static constexpr size_t getDynamicSlotIndexFromOffset(size_t offset) {
+ MOZ_ASSERT(offset % sizeof(Value) == 0);
+ return offset / sizeof(Value);
+ }
+ static size_t offsetOfSlots() { return offsetof(NativeObject, slots_); }
+};
+
+inline void NativeObject::privatePreWriteBarrier(HeapSlot* pprivate) {
+ JS::shadow::Zone* shadowZone = this->shadowZoneFromAnyThread();
+ if (shadowZone->needsIncrementalBarrier() && pprivate->get().toPrivate() &&
+ getClass()->hasTrace()) {
+ getClass()->doTrace(shadowZone->barrierTracer(), this);
+ }
+}
+
+/*** Standard internal methods **********************************************/
+
+/*
+ * These functions should follow the algorithms in ES6 draft rev 29 section 9.1
+ * ("Ordinary Object Internal Methods"). It's an ongoing project.
+ *
+ * Many native objects are not "ordinary" in ES6, so these functions also have
+ * to serve some of the special needs of Functions (9.2, 9.3, 9.4.1), Arrays
+ * (9.4.2), Strings (9.4.3), and so on.
+ */
+
+extern bool NativeDefineProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id,
+ Handle<JS::PropertyDescriptor> desc,
+ ObjectOpResult& result);
+
+extern bool NativeDefineDataProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, HandleValue value,
+ unsigned attrs, ObjectOpResult& result);
+
+/* If the result out-param is omitted, throw on failure. */
+
+extern bool NativeDefineAccessorProperty(JSContext* cx,
+ Handle<NativeObject*> obj, HandleId id,
+ HandleObject getter,
+ HandleObject setter, unsigned attrs);
+
+extern bool NativeDefineDataProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, HandleValue value,
+ unsigned attrs);
+
+extern bool NativeDefineDataProperty(JSContext* cx, Handle<NativeObject*> obj,
+ PropertyName* name, HandleValue value,
+ unsigned attrs);
+
+extern bool NativeHasProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, bool* foundp);
+
+extern bool NativeGetOwnPropertyDescriptor(
+ JSContext* cx, Handle<NativeObject*> obj, HandleId id,
+ MutableHandle<mozilla::Maybe<JS::PropertyDescriptor>> desc);
+
+extern bool NativeGetProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleValue receiver, HandleId id,
+ MutableHandleValue vp);
+
+extern bool NativeGetPropertyNoGC(JSContext* cx, NativeObject* obj,
+ const Value& receiver, jsid id, Value* vp);
+
+inline bool NativeGetProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, MutableHandleValue vp) {
+ RootedValue receiver(cx, ObjectValue(*obj));
+ return NativeGetProperty(cx, obj, receiver, id, vp);
+}
+
+extern bool NativeGetElement(JSContext* cx, Handle<NativeObject*> obj,
+ HandleValue receiver, int32_t index,
+ MutableHandleValue vp);
+
+bool GetSparseElementHelper(JSContext* cx, Handle<NativeObject*> obj,
+ int32_t int_id, MutableHandleValue result);
+
+bool SetPropertyByDefining(JSContext* cx, HandleId id, HandleValue v,
+ HandleValue receiver, ObjectOpResult& result);
+
+bool SetPropertyOnProto(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue v, HandleValue receiver,
+ ObjectOpResult& result);
+
+bool AddOrUpdateSparseElementHelper(JSContext* cx, Handle<NativeObject*> obj,
+ int32_t int_id, HandleValue v, bool strict);
+
+/*
+ * Indicates whether an assignment operation is qualified (`x.y = 0`) or
+ * unqualified (`y = 0`). In strict mode, the latter is an error if no such
+ * variable already exists.
+ *
+ * Used as an argument to NativeSetProperty.
+ */
+enum QualifiedBool { Unqualified = 0, Qualified = 1 };
+
+template <QualifiedBool Qualified>
+extern bool NativeSetProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, HandleValue v, HandleValue receiver,
+ ObjectOpResult& result);
+
+extern bool NativeSetElement(JSContext* cx, Handle<NativeObject*> obj,
+ uint32_t index, HandleValue v,
+ HandleValue receiver, ObjectOpResult& result);
+
+extern bool NativeDeleteProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, ObjectOpResult& result);
+
+/*** SpiderMonkey nonstandard internal methods ******************************/
+
+template <AllowGC allowGC>
+extern bool NativeLookupOwnProperty(
+ JSContext* cx, typename MaybeRooted<NativeObject*, allowGC>::HandleType obj,
+ typename MaybeRooted<jsid, allowGC>::HandleType id, PropertyResult* propp);
+
+/*
+ * Get a property from `receiver`, after having already done a lookup and found
+ * the property on a native object `obj`.
+ *
+ * `prop` must be present in obj's shape.
+ */
+extern bool NativeGetExistingProperty(JSContext* cx, HandleObject receiver,
+ Handle<NativeObject*> obj, HandleId id,
+ PropertyInfo prop, MutableHandleValue vp);
+
+/* * */
+
+extern bool GetNameBoundInEnvironment(JSContext* cx, HandleObject env,
+ HandleId id, MutableHandleValue vp);
+
+} /* namespace js */
+
+template <>
+inline bool JSObject::is<js::NativeObject>() const {
+ return shape()->isNative();
+}
+
+namespace js {
+
+// Alternate to JSObject::as<NativeObject>() that tolerates null pointers.
+inline NativeObject* MaybeNativeObject(JSObject* obj) {
+ return obj ? &obj->as<NativeObject>() : nullptr;
+}
+
+// Defined in NativeObject-inl.h.
+bool IsPackedArray(JSObject* obj);
+
+// Initialize an object's reserved slot with a private value pointing to
+// malloc-allocated memory and associate the memory with the object.
+//
+// This call should be matched with a call to JS::GCContext::free_/delete_ in
+// the object's finalizer to free the memory and update the memory accounting.
+
+inline void InitReservedSlot(NativeObject* obj, uint32_t slot, void* ptr,
+ size_t nbytes, MemoryUse use) {
+ AddCellMemory(obj, nbytes, use);
+ obj->initReservedSlot(slot, PrivateValue(ptr));
+}
+template <typename T>
+inline void InitReservedSlot(NativeObject* obj, uint32_t slot, T* ptr,
+ MemoryUse use) {
+ InitReservedSlot(obj, slot, ptr, sizeof(T), use);
+}
+
+bool AddSlotAndCallAddPropHook(JSContext* cx, Handle<NativeObject*> obj,
+ HandleValue v, Handle<Shape*> newShape);
+
+} // namespace js
+
+#endif /* vm_NativeObject_h */
diff --git a/js/src/vm/NumberObject-inl.h b/js/src/vm/NumberObject-inl.h
new file mode 100644
index 0000000000..4f4123f18d
--- /dev/null
+++ b/js/src/vm/NumberObject-inl.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_NumberObject_inl_h
+#define vm_NumberObject_inl_h
+
+#include "vm/NumberObject.h"
+
+#include "vm/JSObject-inl.h"
+
+namespace js {
+
+inline NumberObject* NumberObject::create(JSContext* cx, double d,
+ HandleObject proto /* = nullptr */) {
+ NumberObject* obj = NewObjectWithClassProto<NumberObject>(cx, proto);
+ if (!obj) {
+ return nullptr;
+ }
+ obj->setPrimitiveValue(d);
+ return obj;
+}
+
+} // namespace js
+
+#endif /* vm_NumberObject_inl_h */
diff --git a/js/src/vm/NumberObject.h b/js/src/vm/NumberObject.h
new file mode 100644
index 0000000000..936cd9db1d
--- /dev/null
+++ b/js/src/vm/NumberObject.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_NumberObject_h
+#define vm_NumberObject_h
+
+#include "vm/NativeObject.h"
+
+namespace js {
+
+class NumberObject : public NativeObject {
+ /* Stores this Number object's [[PrimitiveValue]]. */
+ static const unsigned PRIMITIVE_VALUE_SLOT = 0;
+
+ static const ClassSpec classSpec_;
+
+ public:
+ static const unsigned RESERVED_SLOTS = 1;
+
+ static const JSClass class_;
+
+ /*
+ * Creates a new Number object boxing the given number.
+ * If proto is nullptr, then Number.prototype will be used instead.
+ */
+ static inline NumberObject* create(JSContext* cx, double d,
+ HandleObject proto = nullptr);
+
+ double unbox() const { return getFixedSlot(PRIMITIVE_VALUE_SLOT).toNumber(); }
+
+ private:
+ static JSObject* createPrototype(JSContext* cx, JSProtoKey key);
+
+ inline void setPrimitiveValue(double d) {
+ setFixedSlot(PRIMITIVE_VALUE_SLOT, NumberValue(d));
+ }
+};
+
+} // namespace js
+
+#endif /* vm_NumberObject_h */
diff --git a/js/src/vm/ObjectFlags-inl.h b/js/src/vm/ObjectFlags-inl.h
new file mode 100644
index 0000000000..6a7cc64c15
--- /dev/null
+++ b/js/src/vm/ObjectFlags-inl.h
@@ -0,0 +1,61 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ObjectFlags_inl_h
+#define vm_ObjectFlags_inl_h
+
+#include "vm/ObjectFlags.h"
+
+#include "builtin/Array.h"
+#include "vm/JSAtomState.h"
+#include "vm/JSContext.h"
+#include "vm/PlainObject.h"
+#include "vm/PropertyInfo.h"
+
+namespace js {
+
+MOZ_ALWAYS_INLINE ObjectFlags
+GetObjectFlagsForNewProperty(const JSClass* clasp, ObjectFlags flags, jsid id,
+ PropertyFlags propFlags, JSContext* cx) {
+ uint32_t index;
+ if (IdIsIndex(id, &index)) {
+ flags.setFlag(ObjectFlag::Indexed);
+ } else if (id.isSymbol() && id.toSymbol()->isInterestingSymbol()) {
+ flags.setFlag(ObjectFlag::HasInterestingSymbol);
+ }
+
+ if ((!propFlags.isDataProperty() || !propFlags.writable()) &&
+ clasp == &PlainObject::class_ && !id.isAtom(cx->names().proto)) {
+ flags.setFlag(ObjectFlag::HasNonWritableOrAccessorPropExclProto);
+ }
+
+ if (propFlags.enumerable()) {
+ flags.setFlag(ObjectFlag::HasEnumerable);
+ }
+
+ return flags;
+}
+
+// When reusing another shape's PropMap, we need to copy the object flags that
+// are based on property information. This is equivalent to (but faster than)
+// calling GetObjectFlagsForNewProperty for all properties in the map.
+inline ObjectFlags CopyPropMapObjectFlags(ObjectFlags dest,
+ ObjectFlags source) {
+ if (source.hasFlag(ObjectFlag::Indexed)) {
+ dest.setFlag(ObjectFlag::Indexed);
+ }
+ if (source.hasFlag(ObjectFlag::HasInterestingSymbol)) {
+ dest.setFlag(ObjectFlag::HasInterestingSymbol);
+ }
+ if (source.hasFlag(ObjectFlag::HasNonWritableOrAccessorPropExclProto)) {
+ dest.setFlag(ObjectFlag::HasNonWritableOrAccessorPropExclProto);
+ }
+ return dest;
+}
+
+} // namespace js
+
+#endif /* vm_ObjectFlags_inl_h */
diff --git a/js/src/vm/ObjectFlags.h b/js/src/vm/ObjectFlags.h
new file mode 100644
index 0000000000..1c715650f0
--- /dev/null
+++ b/js/src/vm/ObjectFlags.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ObjectFlags_h
+#define vm_ObjectFlags_h
+
+#include <stdint.h>
+
+#include "util/EnumFlags.h" // js::EnumFlags
+
+namespace js {
+
+// Flags set on the Shape which describe the referring object. Once set these
+// cannot be unset (except during object densification of sparse indexes), and
+// are transferred from shape to shape as the object's last property changes.
+//
+// If you add a new flag here, please add appropriate code to JSObject::dump to
+// dump it as part of the object representation.
+enum class ObjectFlag : uint16_t {
+ IsUsedAsPrototype = 1 << 0,
+ NotExtensible = 1 << 1,
+ Indexed = 1 << 2,
+ HasInterestingSymbol = 1 << 3,
+
+ // If set, the shape's property map may contain an enumerable property. This
+ // only accounts for (own) shape properties: if the flag is not set, the
+ // object may still have (enumerable) dense elements, typed array elements, or
+ // a JSClass enumeration hook.
+ HasEnumerable = 1 << 4,
+
+ FrozenElements = 1 << 5, // See ObjectElements::FROZEN comment.
+
+ // If set, the shape teleporting optimization can no longer be used for
+ // accessing properties on this object.
+ // See: JSObject::hasInvalidatedTeleporting, ProtoChainSupportsTeleporting.
+ InvalidatedTeleporting = 1 << 6,
+
+ ImmutablePrototype = 1 << 7,
+
+ // See JSObject::isQualifiedVarObj().
+ QualifiedVarObj = 1 << 8,
+
+ // If set, the object may have a non-writable property or an accessor
+ // property.
+ //
+ // * This is only set for PlainObjects because we only need it for these
+ // objects and setting it for other objects confuses insertInitialShape.
+ //
+ // * This flag does not account for properties named "__proto__". This is
+ // because |Object.prototype| has a "__proto__" accessor property and we
+ // don't want to include it because it would result in the flag being set on
+ // most proto chains. Code using this flag must check for "__proto__"
+ // property names separately.
+ HasNonWritableOrAccessorPropExclProto = 1 << 9,
+
+ // If set, the object either mutated or deleted an accessor property. This is
+ // used to invalidate IC/Warp code specializing on specific getter/setter
+ // objects. See also the SMDOC comment in vm/GetterSetter.h.
+ HadGetterSetterChange = 1 << 10,
+
+ // If set, use the watchtower testing mechanism to log changes to this object.
+ UseWatchtowerTestingLog = 1 << 11,
+
+ // If set, access to existing properties of this global object can be guarded
+ // based on a per-global counter that is incremented when the global object
+ // has its properties reordered/shadowed, instead of a shape guard.
+ GenerationCountedGlobal = 1 << 12,
+};
+
+using ObjectFlags = EnumFlags<ObjectFlag>;
+
+} // namespace js
+
+#endif /* vm_ObjectFlags_h */
diff --git a/js/src/vm/ObjectOperations-inl.h b/js/src/vm/ObjectOperations-inl.h
new file mode 100644
index 0000000000..f85422e15a
--- /dev/null
+++ b/js/src/vm/ObjectOperations-inl.h
@@ -0,0 +1,388 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Fundamental operations on objects. */
+
+#ifndef vm_ObjectOperations_inl_h
+#define vm_ObjectOperations_inl_h
+
+#include "vm/ObjectOperations.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Attributes.h" // MOZ_ALWAYS_INLINE
+#include "mozilla/Likely.h" // MOZ_UNLIKELY
+
+#include <stdint.h> // uint32_t
+
+#include "js/Class.h" // js::{Delete,Get,Has}PropertyOp, JSMayResolveOp, JS::ObjectOpResult
+#include "js/GCAPI.h" // JS::AutoSuppressGCAnalysis
+#include "js/Id.h" // INT_TO_JSID, jsid, JSID_INT_MAX, SYMBOL_TO_JSID
+#include "js/RootingAPI.h" // JS::Handle, JS::MutableHandle, JS::Rooted
+#include "js/Value.h" // JS::ObjectValue, JS::Value
+#include "proxy/Proxy.h" // js::Proxy
+#include "vm/JSContext.h" // JSContext
+#include "vm/JSObject.h" // JSObject
+#include "vm/NativeObject.h" // js::NativeObject, js::Native{Get,Has,Set}Property, js::NativeGetPropertyNoGC, js::Qualified
+#include "vm/ProxyObject.h" // js::ProxyObject
+#include "vm/StringType.h" // js::NameToId
+#include "vm/SymbolType.h" // JS::Symbol
+
+#include "vm/JSAtom-inl.h" // js::IndexToId
+
+namespace js {
+
+// The functions below are the fundamental operations on objects. See the
+// comment about "Standard internal methods" in jsapi.h.
+
+/*
+ * ES6 [[GetPrototypeOf]]. Get obj's prototype, storing it in protop.
+ *
+ * If obj is definitely not a proxy, the infallible obj->getProto() can be used
+ * instead. See the comment on JSObject::getTaggedProto().
+ */
+inline bool GetPrototype(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::MutableHandle<JSObject*> protop) {
+ if (obj->hasDynamicPrototype()) {
+ MOZ_ASSERT(obj->is<ProxyObject>());
+ return Proxy::getPrototype(cx, obj, protop);
+ }
+
+ protop.set(obj->staticPrototype());
+ return true;
+}
+
+/*
+ * ES6 [[IsExtensible]]. Extensible objects can have new properties defined on
+ * them. Inextensible objects can't, and their [[Prototype]] slot is fixed as
+ * well.
+ */
+inline bool IsExtensible(JSContext* cx, JS::Handle<JSObject*> obj,
+ bool* extensible) {
+ if (obj->is<ProxyObject>()) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ return Proxy::isExtensible(cx, obj, extensible);
+ }
+
+ *extensible = obj->nonProxyIsExtensible();
+
+ // If the following assertion fails, there's somewhere else a missing
+ // call to shrinkCapacityToInitializedLength() which needs to be found and
+ // fixed.
+ MOZ_ASSERT_IF(obj->is<NativeObject>() && !*extensible,
+ obj->as<NativeObject>().getDenseInitializedLength() ==
+ obj->as<NativeObject>().getDenseCapacity());
+ return true;
+}
+
+/*
+ * ES6 [[Has]]. Set *foundp to true if `id in obj` (that is, if obj has an own
+ * or inherited property obj[id]), false otherwise.
+ */
+inline bool HasProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, bool* foundp) {
+ if (HasPropertyOp op = obj->getOpsHasProperty()) {
+ return op(cx, obj, id, foundp);
+ }
+
+ return NativeHasProperty(cx, obj.as<NativeObject>(), id, foundp);
+}
+
+inline bool HasProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ PropertyName* name, bool* foundp) {
+ JS::Rooted<jsid> id(cx, NameToId(name));
+ return HasProperty(cx, obj, id, foundp);
+}
+
+/*
+ * ES6 [[Get]]. Get the value of the property `obj[id]`, or undefined if no
+ * such property exists.
+ *
+ * Typically obj == receiver; if obj != receiver then the caller is most likely
+ * a proxy using GetProperty to finish a property get that started out as
+ * `receiver[id]`, and we've already searched the prototype chain up to `obj`.
+ */
+inline bool GetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JS::Value> receiver, JS::Handle<jsid> id,
+ JS::MutableHandle<JS::Value> vp) {
+#ifdef ENABLE_RECORD_TUPLE
+ MOZ_ASSERT(!IsExtendedPrimitive(*obj));
+#endif
+
+ if (GetPropertyOp op = obj->getOpsGetProperty()) {
+ return op(cx, obj, receiver, id, vp);
+ }
+
+ return NativeGetProperty(cx, obj.as<NativeObject>(), receiver, id, vp);
+}
+
+inline bool GetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JS::Value> receiver, PropertyName* name,
+ JS::MutableHandle<JS::Value> vp) {
+ JS::Rooted<jsid> id(cx, NameToId(name));
+ return GetProperty(cx, obj, receiver, id, vp);
+}
+
+inline bool GetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JSObject*> receiver, JS::Handle<jsid> id,
+ JS::MutableHandle<JS::Value> vp) {
+ JS::Rooted<JS::Value> receiverValue(cx, JS::ObjectValue(*receiver));
+ return GetProperty(cx, obj, receiverValue, id, vp);
+}
+
+inline bool GetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JSObject*> receiver, PropertyName* name,
+ JS::MutableHandle<JS::Value> vp) {
+ JS::Rooted<JS::Value> receiverValue(cx, JS::ObjectValue(*receiver));
+ return GetProperty(cx, obj, receiverValue, name, vp);
+}
+
+inline bool GetElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JS::Value> receiver, uint32_t index,
+ JS::MutableHandle<JS::Value> vp) {
+ JS::Rooted<jsid> id(cx);
+ if (!IndexToId(cx, index, &id)) {
+ return false;
+ }
+
+ return GetProperty(cx, obj, receiver, id, vp);
+}
+
+inline bool GetElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JSObject*> receiver, uint32_t index,
+ JS::MutableHandle<JS::Value> vp) {
+ JS::Rooted<JS::Value> receiverValue(cx, JS::ObjectValue(*receiver));
+ return GetElement(cx, obj, receiverValue, index, vp);
+}
+
+inline bool GetElementLargeIndex(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JSObject*> receiver, uint64_t index,
+ JS::MutableHandle<JS::Value> vp) {
+ MOZ_ASSERT(index < uint64_t(DOUBLE_INTEGRAL_PRECISION_LIMIT));
+
+ if (MOZ_LIKELY(index <= UINT32_MAX)) {
+ return GetElement(cx, obj, receiver, uint32_t(index), vp);
+ }
+
+ RootedValue tmp(cx, DoubleValue(index));
+ RootedId id(cx);
+ if (!PrimitiveValueToId<CanGC>(cx, tmp, &id)) {
+ return false;
+ }
+
+ return GetProperty(cx, obj, obj, id, vp);
+}
+
+inline bool GetPropertyNoGC(JSContext* cx, JSObject* obj,
+ const JS::Value& receiver, jsid id, JS::Value* vp) {
+#ifdef ENABLE_RECORD_TUPLE
+ MOZ_ASSERT(!IsExtendedPrimitive(*obj));
+#endif
+
+ if (obj->getOpsGetProperty()) {
+ return false;
+ }
+
+ return NativeGetPropertyNoGC(cx, &obj->as<NativeObject>(), receiver, id, vp);
+}
+
+inline bool GetPropertyNoGC(JSContext* cx, JSObject* obj,
+ const JS::Value& receiver, PropertyName* name,
+ JS::Value* vp) {
+ return GetPropertyNoGC(cx, obj, receiver, NameToId(name), vp);
+}
+
+inline bool GetElementNoGC(JSContext* cx, JSObject* obj,
+ const JS::Value& receiver, uint32_t index,
+ JS::Value* vp) {
+ if (obj->getOpsGetProperty()) {
+ return false;
+ }
+
+ if (index > PropertyKey::IntMax) {
+ return false;
+ }
+
+ return GetPropertyNoGC(cx, obj, receiver, PropertyKey::Int(index), vp);
+}
+
+static MOZ_ALWAYS_INLINE bool ClassMayResolveId(const JSAtomState& names,
+ const JSClass* clasp, jsid id,
+ JSObject* maybeObj) {
+ MOZ_ASSERT_IF(maybeObj, maybeObj->getClass() == clasp);
+
+ if (!clasp->getResolve()) {
+ // Sanity check: we should only have a mayResolve hook if we have a
+ // resolve hook.
+ MOZ_ASSERT(!clasp->getMayResolve(),
+ "Class with mayResolve hook but no resolve hook");
+ return false;
+ }
+
+ if (JSMayResolveOp mayResolve = clasp->getMayResolve()) {
+ // Tell the analysis our mayResolve hooks won't trigger GC.
+ JS::AutoSuppressGCAnalysis nogc;
+ if (!mayResolve(names, id, maybeObj)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Returns whether |obj| or an object on its proto chain may have an interesting
+// symbol property (see JSObject::hasInterestingSymbolProperty). If it returns
+// true, *holder is set to the object that may have this property.
+MOZ_ALWAYS_INLINE bool MaybeHasInterestingSymbolProperty(
+ JSContext* cx, JSObject* obj, JS::Symbol* symbol,
+ JSObject** holder /* = nullptr */) {
+ MOZ_ASSERT(symbol->isInterestingSymbol());
+
+ jsid id = PropertyKey::Symbol(symbol);
+ do {
+ if (obj->maybeHasInterestingSymbolProperty() ||
+ MOZ_UNLIKELY(
+ ClassMayResolveId(cx->names(), obj->getClass(), id, obj))) {
+ if (holder) {
+ *holder = obj;
+ }
+ return true;
+ }
+ obj = obj->staticPrototype();
+ } while (obj);
+
+ return false;
+}
+
+// Like GetProperty but optimized for interesting symbol properties like
+// @@toStringTag.
+MOZ_ALWAYS_INLINE bool GetInterestingSymbolProperty(
+ JSContext* cx, JS::Handle<JSObject*> obj, JS::Symbol* sym,
+ JS::MutableHandle<JS::Value> vp) {
+ JSObject* holder;
+ if (!MaybeHasInterestingSymbolProperty(cx, obj, sym, &holder)) {
+#ifdef DEBUG
+ JS::Rooted<JS::Value> receiver(cx, JS::ObjectValue(*obj));
+ JS::Rooted<jsid> id(cx, PropertyKey::Symbol(sym));
+ if (!GetProperty(cx, obj, receiver, id, vp)) {
+ return false;
+ }
+ MOZ_ASSERT(vp.isUndefined());
+#endif
+
+ vp.setUndefined();
+ return true;
+ }
+
+ JS::Rooted<JSObject*> holderRoot(cx, holder);
+ JS::Rooted<JS::Value> receiver(cx, JS::ObjectValue(*obj));
+ JS::Rooted<jsid> id(cx, PropertyKey::Symbol(sym));
+ return GetProperty(cx, holderRoot, receiver, id, vp);
+}
+
+/*
+ * ES6 [[Set]]. Carry out the assignment `obj[id] = v`.
+ *
+ * The `receiver` argument has to do with how [[Set]] interacts with the
+ * prototype chain and proxies. It's hard to explain and ES6 doesn't really
+ * try. Long story short, if you just want bog-standard assignment, pass
+ * `ObjectValue(*obj)` as receiver. Or better, use one of the signatures that
+ * doesn't have a receiver parameter.
+ *
+ * Callers pass obj != receiver e.g. when a proxy is involved, obj is the
+ * proxy's target, and the proxy is using SetProperty to finish an assignment
+ * that started out as `receiver[id] = v`, by delegating it to obj.
+ */
+inline bool SetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, JS::Handle<JS::Value> v,
+ JS::Handle<JS::Value> receiver,
+ JS::ObjectOpResult& result) {
+ if (obj->getOpsSetProperty()) {
+ return JSObject::nonNativeSetProperty(cx, obj, id, v, receiver, result);
+ }
+
+ return NativeSetProperty<Qualified>(cx, obj.as<NativeObject>(), id, v,
+ receiver, result);
+}
+
+inline bool SetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, JS::Handle<JS::Value> v) {
+ JS::Rooted<JS::Value> receiver(cx, JS::ObjectValue(*obj));
+ JS::ObjectOpResult result;
+ return SetProperty(cx, obj, id, v, receiver, result) &&
+ result.checkStrict(cx, obj, id);
+}
+
+inline bool SetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ PropertyName* name, JS::Handle<JS::Value> v,
+ JS::Handle<JS::Value> receiver,
+ JS::ObjectOpResult& result) {
+ JS::Rooted<jsid> id(cx, NameToId(name));
+ return SetProperty(cx, obj, id, v, receiver, result);
+}
+
+inline bool SetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ PropertyName* name, JS::Handle<JS::Value> v) {
+ JS::Rooted<jsid> id(cx, NameToId(name));
+ JS::Rooted<JS::Value> receiver(cx, JS::ObjectValue(*obj));
+ JS::ObjectOpResult result;
+ return SetProperty(cx, obj, id, v, receiver, result) &&
+ result.checkStrict(cx, obj, id);
+}
+
+inline bool SetElement(JSContext* cx, JS::Handle<JSObject*> obj, uint32_t index,
+ JS::Handle<JS::Value> v, JS::Handle<JS::Value> receiver,
+ JS::ObjectOpResult& result) {
+ if (obj->getOpsSetProperty()) {
+ return JSObject::nonNativeSetElement(cx, obj, index, v, receiver, result);
+ }
+
+ return NativeSetElement(cx, obj.as<NativeObject>(), index, v, receiver,
+ result);
+}
+
+/*
+ * ES6 draft rev 31 (15 Jan 2015) 7.3.3 Put (O, P, V, Throw), except that on
+ * success, the spec says this is supposed to return a boolean value, which we
+ * don't bother doing.
+ */
+inline bool PutProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, JS::Handle<JS::Value> v,
+ bool strict) {
+ JS::Rooted<JS::Value> receiver(cx, JS::ObjectValue(*obj));
+ JS::ObjectOpResult result;
+ return SetProperty(cx, obj, id, v, receiver, result) &&
+ result.checkStrictModeError(cx, obj, id, strict);
+}
+
+/*
+ * ES6 [[Delete]]. Equivalent to the JS code `delete obj[id]`.
+ */
+inline bool DeleteProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, JS::ObjectOpResult& result) {
+#ifdef ENABLE_RECORD_TUPLE
+ MOZ_ASSERT(!IsExtendedPrimitive(*obj));
+#endif
+
+ if (DeletePropertyOp op = obj->getOpsDeleteProperty()) {
+ return op(cx, obj, id, result);
+ }
+
+ return NativeDeleteProperty(cx, obj.as<NativeObject>(), id, result);
+}
+
+inline bool DeleteElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, JS::ObjectOpResult& result) {
+ JS::Rooted<jsid> id(cx);
+ if (!IndexToId(cx, index, &id)) {
+ return false;
+ }
+
+ return DeleteProperty(cx, obj, id, result);
+}
+
+} /* namespace js */
+
+#endif /* vm_ObjectOperations_inl_h */
diff --git a/js/src/vm/ObjectOperations.h b/js/src/vm/ObjectOperations.h
new file mode 100644
index 0000000000..ef8c3575de
--- /dev/null
+++ b/js/src/vm/ObjectOperations.h
@@ -0,0 +1,301 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Fundamental operations on objects. */
+
+#ifndef vm_ObjectOperations_h
+#define vm_ObjectOperations_h
+
+#include "mozilla/Attributes.h" // MOZ_ALWAYS_INLINE
+#include "mozilla/Maybe.h"
+
+#include <stdint.h> // uint32_t
+
+#include "js/Id.h" // INT_TO_JSID, jsid, JSID_INT_MAX, SYMBOL_TO_JSID
+#include "js/PropertyDescriptor.h" // JSPROP_ENUMERATE, JS::PropertyDescriptor
+#include "js/RootingAPI.h" // JS::Handle, JS::MutableHandle, JS::Rooted
+#include "js/TypeDecls.h" // fwd-decl: JSContext, Symbol, Value
+#include "vm/StringType.h" // js::NameToId
+
+namespace JS {
+class ObjectOpResult;
+}
+
+namespace js {
+
+class PropertyResult;
+
+// The functions below are the fundamental operations on objects. See the
+// comment about "Standard internal methods" in jsapi.h.
+
+/*
+ * ES6 [[GetPrototypeOf]]. Get obj's prototype, storing it in protop.
+ *
+ * If obj is definitely not a proxy, the infallible obj->getProto() can be used
+ * instead. See the comment on JSObject::getTaggedProto().
+ */
+inline bool GetPrototype(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::MutableHandle<JSObject*> protop);
+
+/*
+ * ES6 [[SetPrototypeOf]]. Change obj's prototype to proto.
+ *
+ * Returns false on error, success of operation in *result. For example, if
+ * obj is not extensible, its prototype is fixed. js::SetPrototype will return
+ * true, because no exception is thrown for this; but *result will be false.
+ */
+extern bool SetPrototype(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JSObject*> proto,
+ JS::ObjectOpResult& result);
+
+/* Convenience function: like the above, but throw on failure. */
+extern bool SetPrototype(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JSObject*> proto);
+
+/*
+ * ES6 [[IsExtensible]]. Extensible objects can have new properties defined on
+ * them. Inextensible objects can't, and their [[Prototype]] slot is fixed as
+ * well.
+ */
+inline bool IsExtensible(JSContext* cx, JS::Handle<JSObject*> obj,
+ bool* extensible);
+
+/*
+ * ES6 [[PreventExtensions]]. Attempt to change the [[Extensible]] bit on |obj|
+ * to false. Indicate success or failure through the |result| outparam, or
+ * actual error through the return value.
+ */
+extern bool PreventExtensions(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::ObjectOpResult& result);
+
+/* Convenience function. As above, but throw on failure. */
+extern bool PreventExtensions(JSContext* cx, JS::Handle<JSObject*> obj);
+
+/*
+ * ES6 [[GetOwnProperty]]. Get a description of one of obj's own properties.
+ *
+ * If no such property exists on obj, desc will be Nothing().
+ */
+extern bool GetOwnPropertyDescriptor(
+ JSContext* cx, JS::Handle<JSObject*> obj, JS::Handle<jsid> id,
+ JS::MutableHandle<mozilla::Maybe<JS::PropertyDescriptor>> desc);
+
+/* ES6 [[DefineOwnProperty]]. Define a property on obj. */
+extern bool DefineProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ Handle<JS::PropertyDescriptor> desc,
+ JS::ObjectOpResult& result);
+
+/*
+ * When the 'result' out-param is omitted, the behavior is the same as above,
+ * except that any failure results in a TypeError.
+ */
+extern bool DefineProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::Handle<JS::PropertyDescriptor> desc);
+
+extern bool DefineAccessorProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::Handle<JSObject*> getter,
+ JS::Handle<JSObject*> setter, unsigned attrs,
+ JS::ObjectOpResult& result);
+
+extern bool DefineDataProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, JS::Handle<JS::Value> value,
+ unsigned attrs, JS::ObjectOpResult& result);
+
+extern bool DefineAccessorProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::Handle<JSObject*> getter,
+ JS::Handle<JSObject*> setter,
+ unsigned attrs = JSPROP_ENUMERATE);
+
+extern bool DefineDataProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, JS::Handle<JS::Value> value,
+ unsigned attrs = JSPROP_ENUMERATE);
+
+extern bool DefineDataProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ PropertyName* name, JS::Handle<JS::Value> value,
+ unsigned attrs = JSPROP_ENUMERATE);
+
+extern bool DefineDataElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, JS::Handle<JS::Value> value,
+ unsigned attrs = JSPROP_ENUMERATE);
+
+/*
+ * ES6 [[Has]]. Set *foundp to true if `id in obj` (that is, if obj has an own
+ * or inherited property obj[id]), false otherwise.
+ */
+inline bool HasProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, bool* foundp);
+
+inline bool HasProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ PropertyName* name, bool* foundp);
+
+/*
+ * ES6 [[Get]]. Get the value of the property `obj[id]`, or undefined if no
+ * such property exists.
+ *
+ * Typically obj == receiver; if obj != receiver then the caller is most likely
+ * a proxy using GetProperty to finish a property get that started out as
+ * `receiver[id]`, and we've already searched the prototype chain up to `obj`.
+ */
+inline bool GetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JS::Value> receiver, JS::Handle<jsid> id,
+ JS::MutableHandle<JS::Value> vp);
+
+inline bool GetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JS::Value> receiver, PropertyName* name,
+ JS::MutableHandle<JS::Value> vp);
+
+inline bool GetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JSObject*> receiver, JS::Handle<jsid> id,
+ JS::MutableHandle<JS::Value> vp);
+
+inline bool GetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JSObject*> receiver, PropertyName* name,
+ JS::MutableHandle<JS::Value> vp);
+
+inline bool GetElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JS::Value> receiver, uint32_t index,
+ JS::MutableHandle<JS::Value> vp);
+
+inline bool GetElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<JSObject*> receiver, uint32_t index,
+ JS::MutableHandle<JS::Value> vp);
+
+inline bool GetPropertyNoGC(JSContext* cx, JSObject* obj,
+ const JS::Value& receiver, jsid id, JS::Value* vp);
+
+inline bool GetPropertyNoGC(JSContext* cx, JSObject* obj,
+ const JS::Value& receiver, PropertyName* name,
+ JS::Value* vp);
+
+inline bool GetElementNoGC(JSContext* cx, JSObject* obj,
+ const JS::Value& receiver, uint32_t index,
+ JS::Value* vp);
+
+// Returns whether |obj| or an object on its proto chain may have an interesting
+// symbol property (see JSObject::hasInterestingSymbolProperty). If it returns
+// true, *holder is set to the object that may have this property.
+MOZ_ALWAYS_INLINE bool MaybeHasInterestingSymbolProperty(
+ JSContext* cx, JSObject* obj, JS::Symbol* symbol,
+ JSObject** holder = nullptr);
+
+// Like GetProperty but optimized for interesting symbol properties like
+// @@toStringTag.
+MOZ_ALWAYS_INLINE bool GetInterestingSymbolProperty(
+ JSContext* cx, JS::Handle<JSObject*> obj, JS::Symbol* sym,
+ JS::MutableHandle<JS::Value> vp);
+
+/*
+ * ES6 [[Set]]. Carry out the assignment `obj[id] = v`.
+ *
+ * The `receiver` argument has to do with how [[Set]] interacts with the
+ * prototype chain and proxies. It's hard to explain and ES6 doesn't really
+ * try. Long story short, if you just want bog-standard assignment, pass
+ * `ObjectValue(*obj)` as receiver. Or better, use one of the signatures that
+ * doesn't have a receiver parameter.
+ *
+ * Callers pass obj != receiver e.g. when a proxy is involved, obj is the
+ * proxy's target, and the proxy is using SetProperty to finish an assignment
+ * that started out as `receiver[id] = v`, by delegating it to obj.
+ */
+inline bool SetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, JS::Handle<JS::Value> v,
+ JS::Handle<JS::Value> receiver,
+ JS::ObjectOpResult& result);
+
+inline bool SetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, JS::Handle<JS::Value> v);
+
+inline bool SetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ PropertyName* name, JS::Handle<JS::Value> v,
+ JS::Handle<JS::Value> receiver,
+ JS::ObjectOpResult& result);
+
+inline bool SetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ PropertyName* name, JS::Handle<JS::Value> v);
+
+inline bool SetElement(JSContext* cx, JS::Handle<JSObject*> obj, uint32_t index,
+ JS::Handle<JS::Value> v, JS::Handle<JS::Value> receiver,
+ JS::ObjectOpResult& result);
+
+/*
+ * ES6 draft rev 31 (15 Jan 2015) 7.3.3 Put (O, P, V, Throw), except that on
+ * success, the spec says this is supposed to return a boolean value, which we
+ * don't bother doing.
+ */
+inline bool PutProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, JS::Handle<JS::Value> v,
+ bool strict);
+
+/*
+ * ES6 [[Delete]]. Equivalent to the JS code `delete obj[id]`.
+ */
+inline bool DeleteProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, JS::ObjectOpResult& result);
+
+inline bool DeleteElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, JS::ObjectOpResult& result);
+
+/*** SpiderMonkey nonstandard internal methods ******************************/
+
+/**
+ * If |obj| (underneath any functionally-transparent wrapper proxies) has as
+ * its [[GetPrototypeOf]] trap the ordinary [[GetPrototypeOf]] behavior defined
+ * for ordinary objects, set |*isOrdinary = true| and store |obj|'s prototype
+ * in |result|. Otherwise set |*isOrdinary = false|. In case of error, both
+ * outparams have unspecified value.
+ */
+extern bool GetPrototypeIfOrdinary(JSContext* cx, JS::Handle<JSObject*> obj,
+ bool* isOrdinary,
+ JS::MutableHandle<JSObject*> protop);
+
+/*
+ * Attempt to make |obj|'s [[Prototype]] immutable, such that subsequently
+ * trying to change it will not work. If an internal error occurred,
+ * returns false. Otherwise, |*succeeded| is set to true iff |obj|'s
+ * [[Prototype]] is now immutable.
+ */
+extern bool SetImmutablePrototype(JSContext* cx, JS::Handle<JSObject*> obj,
+ bool* succeeded);
+
+/*
+ * Deprecated. Finds a PropertyDescriptor somewhere along the prototype chain,
+ * similar to GetOwnPropertyDescriptor. |holder| indicates on which object the
+ * property was found.
+ */
+extern bool GetPropertyDescriptor(
+ JSContext* cx, JS::Handle<JSObject*> obj, JS::Handle<jsid> id,
+ MutableHandle<mozilla::Maybe<JS::PropertyDescriptor>> desc,
+ JS::MutableHandle<JSObject*> holder);
+
+/*
+ * Deprecated. A version of HasProperty that also returns the object on which
+ * the property was found (but that information is unreliable for proxies), and
+ * the Shape of the property, if native.
+ */
+extern bool LookupProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::MutableHandle<JSObject*> objp,
+ PropertyResult* propp);
+
+inline bool LookupProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ PropertyName* name,
+ JS::MutableHandle<JSObject*> objp,
+ PropertyResult* propp) {
+ JS::Rooted<jsid> id(cx, NameToId(name));
+ return LookupProperty(cx, obj, id, objp, propp);
+}
+
+/* Set *result to tell whether obj has an own property with the given id. */
+extern bool HasOwnProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, bool* result);
+
+} /* namespace js */
+
+#endif /* vm_ObjectOperations_h */
diff --git a/js/src/vm/OffThreadPromiseRuntimeState.cpp b/js/src/vm/OffThreadPromiseRuntimeState.cpp
new file mode 100644
index 0000000000..004c50492a
--- /dev/null
+++ b/js/src/vm/OffThreadPromiseRuntimeState.cpp
@@ -0,0 +1,299 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/OffThreadPromiseRuntimeState.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT{,_IF}
+
+#include <utility> // mozilla::Swap
+
+#include "jspubtd.h" // js::CurrentThreadCanAccessRuntime
+
+#include "js/AllocPolicy.h" // js::ReportOutOfMemory
+#include "js/HeapAPI.h" // JS::shadow::Zone
+#include "js/Promise.h" // JS::Dispatchable, JS::DispatchToEventLoopCallback
+#include "js/Utility.h" // js_delete, js::AutoEnterOOMUnsafeRegion
+#include "threading/ProtectedData.h" // js::UnprotectedData
+#include "vm/HelperThreads.h" // js::AutoLockHelperThreadState
+#include "vm/JSContext.h" // JSContext
+#include "vm/PromiseObject.h" // js::PromiseObject
+#include "vm/Realm.h" // js::AutoRealm
+#include "vm/Runtime.h" // JSRuntime
+
+#include "vm/Realm-inl.h" // js::AutoRealm::AutoRealm
+
+using JS::Handle;
+
+using js::OffThreadPromiseRuntimeState;
+using js::OffThreadPromiseTask;
+
+OffThreadPromiseTask::OffThreadPromiseTask(JSContext* cx,
+ JS::Handle<PromiseObject*> promise)
+ : runtime_(cx->runtime()), promise_(cx, promise), registered_(false) {
+ MOZ_ASSERT(runtime_ == promise_->zone()->runtimeFromMainThread());
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
+ MOZ_ASSERT(cx->runtime()->offThreadPromiseState.ref().initialized());
+}
+
+OffThreadPromiseTask::~OffThreadPromiseTask() {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
+
+ OffThreadPromiseRuntimeState& state = runtime_->offThreadPromiseState.ref();
+ MOZ_ASSERT(state.initialized());
+
+ if (registered_) {
+ unregister(state);
+ }
+}
+
+bool OffThreadPromiseTask::init(JSContext* cx) {
+ MOZ_ASSERT(cx->runtime() == runtime_);
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
+
+ OffThreadPromiseRuntimeState& state = runtime_->offThreadPromiseState.ref();
+ MOZ_ASSERT(state.initialized());
+
+ AutoLockHelperThreadState lock;
+
+ if (!state.live().putNew(this)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ registered_ = true;
+ return true;
+}
+
+void OffThreadPromiseTask::unregister(OffThreadPromiseRuntimeState& state) {
+ MOZ_ASSERT(registered_);
+ AutoLockHelperThreadState lock;
+ state.live().remove(this);
+ registered_ = false;
+}
+
+void OffThreadPromiseTask::run(JSContext* cx,
+ MaybeShuttingDown maybeShuttingDown) {
+ MOZ_ASSERT(cx->runtime() == runtime_);
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
+ MOZ_ASSERT(registered_);
+
+ // Remove this task from live_ before calling `resolve`, so that if `resolve`
+ // itself drains the queue reentrantly, the queue will not think this task is
+ // yet to be queued and block waiting for it.
+ //
+ // The unregister method synchronizes on the helper thread lock and ensures
+ // that we don't delete the task while the helper thread is still running.
+ OffThreadPromiseRuntimeState& state = runtime_->offThreadPromiseState.ref();
+ MOZ_ASSERT(state.initialized());
+ unregister(state);
+
+ if (maybeShuttingDown == JS::Dispatchable::NotShuttingDown) {
+ // We can't leave a pending exception when returning to the caller so do
+ // the same thing as Gecko, which is to ignore the error. This should
+ // only happen due to OOM or interruption.
+ AutoRealm ar(cx, promise_);
+ if (!resolve(cx, promise_)) {
+ cx->clearPendingException();
+ }
+ }
+
+ js_delete(this);
+}
+
+void OffThreadPromiseTask::dispatchResolveAndDestroy() {
+ AutoLockHelperThreadState lock;
+ dispatchResolveAndDestroy(lock);
+}
+
+void OffThreadPromiseTask::dispatchResolveAndDestroy(
+ const AutoLockHelperThreadState& lock) {
+ MOZ_ASSERT(registered_);
+
+ OffThreadPromiseRuntimeState& state = runtime_->offThreadPromiseState.ref();
+ MOZ_ASSERT(state.initialized());
+ MOZ_ASSERT(state.live().has(this));
+
+ // If the dispatch succeeds, then we are guaranteed that run() will be
+ // called on an active JSContext of runtime_.
+ if (state.dispatchToEventLoopCallback_(state.dispatchToEventLoopClosure_,
+ this)) {
+ return;
+ }
+
+ // The DispatchToEventLoopCallback has rejected this task, indicating that
+ // shutdown has begun. Count the number of rejected tasks that have called
+ // dispatchResolveAndDestroy, and when they account for the entire contents of
+ // live_, notify OffThreadPromiseRuntimeState::shutdown that it is safe to
+ // destruct them.
+ state.numCanceled_++;
+ if (state.numCanceled_ == state.live().count()) {
+ state.allCanceled().notify_one();
+ }
+}
+
+OffThreadPromiseRuntimeState::OffThreadPromiseRuntimeState()
+ : dispatchToEventLoopCallback_(nullptr),
+ dispatchToEventLoopClosure_(nullptr),
+ numCanceled_(0),
+ internalDispatchQueueClosed_(false) {}
+
+OffThreadPromiseRuntimeState::~OffThreadPromiseRuntimeState() {
+ MOZ_ASSERT(live_.refNoCheck().empty());
+ MOZ_ASSERT(numCanceled_ == 0);
+ MOZ_ASSERT(internalDispatchQueue_.refNoCheck().empty());
+ MOZ_ASSERT(!initialized());
+}
+
+void OffThreadPromiseRuntimeState::init(
+ JS::DispatchToEventLoopCallback callback, void* closure) {
+ MOZ_ASSERT(!initialized());
+
+ dispatchToEventLoopCallback_ = callback;
+ dispatchToEventLoopClosure_ = closure;
+
+ MOZ_ASSERT(initialized());
+}
+
+/* static */
+bool OffThreadPromiseRuntimeState::internalDispatchToEventLoop(
+ void* closure, JS::Dispatchable* d) {
+ OffThreadPromiseRuntimeState& state =
+ *reinterpret_cast<OffThreadPromiseRuntimeState*>(closure);
+ MOZ_ASSERT(state.usingInternalDispatchQueue());
+ gHelperThreadLock.assertOwnedByCurrentThread();
+
+ if (state.internalDispatchQueueClosed_) {
+ return false;
+ }
+
+ // The JS API contract is that 'false' means shutdown, so be infallible
+ // here (like Gecko).
+ AutoEnterOOMUnsafeRegion noOOM;
+ if (!state.internalDispatchQueue().pushBack(d)) {
+ noOOM.crash("internalDispatchToEventLoop");
+ }
+
+ // Wake up internalDrain() if it is waiting for a job to finish.
+ state.internalDispatchQueueAppended().notify_one();
+ return true;
+}
+
+bool OffThreadPromiseRuntimeState::usingInternalDispatchQueue() const {
+ return dispatchToEventLoopCallback_ == internalDispatchToEventLoop;
+}
+
+void OffThreadPromiseRuntimeState::initInternalDispatchQueue() {
+ init(internalDispatchToEventLoop, this);
+ MOZ_ASSERT(usingInternalDispatchQueue());
+}
+
+bool OffThreadPromiseRuntimeState::initialized() const {
+ return !!dispatchToEventLoopCallback_;
+}
+
+void OffThreadPromiseRuntimeState::internalDrain(JSContext* cx) {
+ MOZ_ASSERT(usingInternalDispatchQueue());
+
+ for (;;) {
+ JS::Dispatchable* d;
+ {
+ AutoLockHelperThreadState lock;
+
+ MOZ_ASSERT(!internalDispatchQueueClosed_);
+ MOZ_ASSERT_IF(!internalDispatchQueue().empty(), !live().empty());
+ if (live().empty()) {
+ return;
+ }
+
+ // There are extant live OffThreadPromiseTasks. If none are in the queue,
+ // block until one of them finishes and enqueues a dispatchable.
+ while (internalDispatchQueue().empty()) {
+ internalDispatchQueueAppended().wait(lock);
+ }
+
+ d = internalDispatchQueue().popCopyFront();
+ }
+
+ // Don't call run() with lock held to avoid deadlock.
+ d->run(cx, JS::Dispatchable::NotShuttingDown);
+ }
+}
+
+bool OffThreadPromiseRuntimeState::internalHasPending() {
+ MOZ_ASSERT(usingInternalDispatchQueue());
+
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(!internalDispatchQueueClosed_);
+ MOZ_ASSERT_IF(!internalDispatchQueue().empty(), !live().empty());
+ return !live().empty();
+}
+
+void OffThreadPromiseRuntimeState::shutdown(JSContext* cx) {
+ if (!initialized()) {
+ return;
+ }
+
+ AutoLockHelperThreadState lock;
+
+ // When the shell is using the internal event loop, we must simulate our
+ // requirement of the embedding that, before shutdown, all successfully-
+ // dispatched-to-event-loop tasks have been run.
+ if (usingInternalDispatchQueue()) {
+ DispatchableFifo dispatchQueue;
+ {
+ std::swap(dispatchQueue, internalDispatchQueue());
+ MOZ_ASSERT(internalDispatchQueue().empty());
+ internalDispatchQueueClosed_ = true;
+ }
+
+ // Don't call run() with lock held to avoid deadlock.
+ AutoUnlockHelperThreadState unlock(lock);
+ for (JS::Dispatchable* d : dispatchQueue) {
+ d->run(cx, JS::Dispatchable::ShuttingDown);
+ }
+ }
+
+ // An OffThreadPromiseTask may only be safely deleted on its JSContext's
+ // thread (since it contains a PersistentRooted holding its promise), and
+ // only after it has called dispatchResolveAndDestroy (since that is our
+ // only indication that its owner is done writing into it).
+ //
+ // OffThreadPromiseTasks accepted by the DispatchToEventLoopCallback are
+ // deleted by their 'run' methods. Only dispatchResolveAndDestroy invokes
+ // the callback, and the point of the callback is to call 'run' on the
+ // JSContext's thread, so the conditions above are met.
+ //
+ // But although the embedding's DispatchToEventLoopCallback promises to run
+ // every task it accepts before shutdown, when shutdown does begin it starts
+ // rejecting tasks; we cannot count on 'run' to clean those up for us.
+ // Instead, dispatchResolveAndDestroy keeps a count of rejected ('canceled')
+ // tasks; once that count covers everything in live_, this function itself
+ // runs only on the JSContext's thread, so we can delete them all here.
+ while (live().count() != numCanceled_) {
+ MOZ_ASSERT(numCanceled_ < live().count());
+ allCanceled().wait(lock);
+ }
+
+ // Now that live_ contains only cancelled tasks, we can just delete
+ // everything.
+ for (OffThreadPromiseTaskSet::Range r = live().all(); !r.empty();
+ r.popFront()) {
+ OffThreadPromiseTask* task = r.front();
+
+ // We don't want 'task' to unregister itself (which would mutate live_ while
+ // we are iterating over it) so reset its internal registered_ flag.
+ MOZ_ASSERT(task->registered_);
+ task->registered_ = false;
+ js_delete(task);
+ }
+ live().clear();
+ numCanceled_ = 0;
+
+ // After shutdown, there should be no OffThreadPromiseTask activity in this
+ // JSRuntime. Revert to the !initialized() state to catch bugs.
+ dispatchToEventLoopCallback_ = nullptr;
+ MOZ_ASSERT(!initialized());
+}
diff --git a/js/src/vm/OffThreadPromiseRuntimeState.h b/js/src/vm/OffThreadPromiseRuntimeState.h
new file mode 100644
index 0000000000..34c21ec106
--- /dev/null
+++ b/js/src/vm/OffThreadPromiseRuntimeState.h
@@ -0,0 +1,208 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_OffThreadPromiseRuntimeState_h
+#define vm_OffThreadPromiseRuntimeState_h
+
+#include <stddef.h> // size_t
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "ds/Fifo.h" // js::Fifo
+#include "js/AllocPolicy.h" // js::SystemAllocPolicy
+#include "js/HashTable.h" // js::DefaultHasher, js::HashSet
+#include "js/Promise.h" // JS::Dispatchable, JS::Dispatchable::MaybeShuttingDown, JS::DispatchToEventLoopCallback
+#include "js/RootingAPI.h" // JS::Handle, JS::PersistentRooted
+#include "threading/ConditionVariable.h" // js::ConditionVariable
+#include "vm/PromiseObject.h" // js::PromiseObject
+
+struct JS_PUBLIC_API JSContext;
+struct JS_PUBLIC_API JSRuntime;
+
+namespace js {
+
+class AutoLockHelperThreadState;
+class OffThreadPromiseRuntimeState;
+
+// [SMDOC] OffThreadPromiseTask: an off-main-thread task that resolves a promise
+//
+// An OffThreadPromiseTask is an abstract base class holding a JavaScript
+// promise that will be resolved (fulfilled or rejected) with the results of a
+// task possibly performed by some other thread.
+//
+// An OffThreadPromiseTask's lifecycle is as follows:
+//
+// - Some JavaScript native wishes to return a promise of the result of some
+// computation that might be performed by other threads (say, helper threads
+// or the embedding's I/O threads), so it creates a PromiseObject to represent
+// the result, and an OffThreadPromiseTask referring to it. After handing the
+// OffThreadPromiseTask to the code doing the actual work, the native is free
+// to return the PromiseObject to its caller.
+//
+// - When the computation is done, successfully or otherwise, it populates the
+// OffThreadPromiseTask—which is actually an instance of some concrete
+// subclass specific to the task—with the information needed to resolve the
+// promise, and calls OffThreadPromiseTask::dispatchResolveAndDestroy. This
+// enqueues a runnable on the JavaScript thread to which the promise belongs.
+//
+// - When it gets around to the runnable, the JavaScript thread calls the
+// OffThreadPromiseTask's `resolve` method, which the concrete subclass has
+// overriden to resolve the promise appropriately. This probably enqueues a
+// promise reaction job.
+//
+// - The JavaScript thread then deletes the OffThreadPromiseTask.
+//
+// During shutdown, the process is slightly different. Enqueuing runnables to
+// the JavaScript thread begins to fail. JSRuntime shutdown waits for all
+// outstanding tasks to call dispatchResolveAndDestroy, and then deletes them on
+// the main thread, without calling `resolve`.
+//
+// For example, the JavaScript function WebAssembly.compile uses
+// OffThreadPromiseTask to manage the result of a helper thread task, accepting
+// binary WebAssembly code and returning a promise of a compiled
+// WebAssembly.Module. It would like to do this compilation work on a helper
+// thread. When called by JavaScript, WebAssembly.compile creates a promise,
+// builds a CompileBufferTask (the OffThreadPromiseTask concrete subclass) to
+// keep track of it, and then hands that to a helper thread. When the helper
+// thread is done, successfully or otherwise, it calls the CompileBufferTask's
+// dispatchResolveAndDestroy method, which enqueues a runnable to the JavaScript
+// thread to resolve the promise and delete the CompileBufferTask.
+// (CompileBufferTask actually implements PromiseHelperTask, which implements
+// OffThreadPromiseTask; PromiseHelperTask is what our helper thread scheduler
+// requires.)
+//
+// OffThreadPromiseTasks are not limited to use with helper threads. For
+// example, a function returning a promise of the result of a network operation
+// could provide the code collecting the incoming data with an
+// OffThreadPromiseTask for the promise, and let the embedding's network I/O
+// threads call dispatchResolveAndDestroy.
+//
+// OffThreadPromiseTask may also be used purely on the main thread, as a way to
+// "queue a task" in HTML terms. Note that a "task" is not the same as a
+// "microtask" and there are separate queues for tasks and microtasks that are
+// drained at separate times in the browser. The task queue is implemented by
+// the browser's main event loop. The microtask queue is implemented
+// by JS::JobQueue, used for promises and gets drained before returning to
+// the event loop. Thus OffThreadPromiseTask can only be used when the spec
+// says "queue a task", as the WebAssembly APIs do.
+//
+// An OffThreadPromiseTask has a JSContext, and must be constructed and have its
+// 'init' method called on that JSContext's thread. Once initialized, its
+// dispatchResolveAndDestroy method may be called from any thread. This is the
+// only safe way to destruct an OffThreadPromiseTask; doing so ensures the
+// OffThreadPromiseTask's destructor will run on the JSContext's thread, either
+// from the event loop or during shutdown.
+//
+// OffThreadPromiseTask::dispatchResolveAndDestroy uses the
+// JS::DispatchToEventLoopCallback provided by the embedding to enqueue
+// runnables on the JavaScript thread. See the comments for
+// DispatchToEventLoopCallback for details.
+
+class OffThreadPromiseTask : public JS::Dispatchable {
+ friend class OffThreadPromiseRuntimeState;
+
+ JSRuntime* runtime_;
+ JS::PersistentRooted<PromiseObject*> promise_;
+ bool registered_;
+
+ void operator=(const OffThreadPromiseTask&) = delete;
+ OffThreadPromiseTask(const OffThreadPromiseTask&) = delete;
+
+ void unregister(OffThreadPromiseRuntimeState& state);
+
+ protected:
+ OffThreadPromiseTask(JSContext* cx, JS::Handle<PromiseObject*> promise);
+
+ // To be called by OffThreadPromiseTask and implemented by the derived class.
+ virtual bool resolve(JSContext* cx, JS::Handle<PromiseObject*> promise) = 0;
+
+ // JS::Dispatchable implementation. Ends with 'delete this'.
+ void run(JSContext* cx, MaybeShuttingDown maybeShuttingDown) final;
+
+ public:
+ ~OffThreadPromiseTask() override;
+
+ // Initializing an OffThreadPromiseTask informs the runtime that it must
+ // wait on shutdown for this task to rejoin the active JSContext by calling
+ // dispatchResolveAndDestroy().
+ bool init(JSContext* cx);
+
+ // An initialized OffThreadPromiseTask can be dispatched to an active
+ // JSContext of its Promise's JSRuntime from any thread. Normally, this will
+ // lead to resolve() being called on JSContext thread, given the Promise.
+ // However, if shutdown interrupts, resolve() may not be called, though the
+ // OffThreadPromiseTask will be destroyed on a JSContext thread.
+ void dispatchResolveAndDestroy();
+ void dispatchResolveAndDestroy(const AutoLockHelperThreadState& lock);
+};
+
+using OffThreadPromiseTaskSet =
+ HashSet<OffThreadPromiseTask*, DefaultHasher<OffThreadPromiseTask*>,
+ SystemAllocPolicy>;
+
+using DispatchableFifo = Fifo<JS::Dispatchable*, 0, SystemAllocPolicy>;
+
+class OffThreadPromiseRuntimeState {
+ friend class OffThreadPromiseTask;
+
+ // These fields are initialized once before any off-thread usage and thus do
+ // not require a lock.
+ JS::DispatchToEventLoopCallback dispatchToEventLoopCallback_;
+ void* dispatchToEventLoopClosure_;
+
+ // A set of all OffThreadPromiseTasks that have successfully called 'init'.
+ // OffThreadPromiseTask's destructor removes them from the set.
+ HelperThreadLockData<OffThreadPromiseTaskSet> live_;
+
+ // The allCanceled_ condition is waited on and notified during engine
+ // shutdown, communicating when all off-thread tasks in live_ are safe to be
+ // destroyed from the (shutting down) main thread. This condition is met when
+ // live_.count() == numCanceled_ where "canceled" means "the
+ // DispatchToEventLoopCallback failed after this task finished execution".
+ HelperThreadLockData<ConditionVariable> allCanceled_;
+ HelperThreadLockData<size_t> numCanceled_;
+
+ // The queue of JS::Dispatchables used by the DispatchToEventLoopCallback that
+ // calling js::UseInternalJobQueues installs.
+ HelperThreadLockData<DispatchableFifo> internalDispatchQueue_;
+ HelperThreadLockData<ConditionVariable> internalDispatchQueueAppended_;
+ HelperThreadLockData<bool> internalDispatchQueueClosed_;
+
+ OffThreadPromiseTaskSet& live() { return live_.ref(); }
+ ConditionVariable& allCanceled() { return allCanceled_.ref(); }
+
+ DispatchableFifo& internalDispatchQueue() {
+ return internalDispatchQueue_.ref();
+ }
+ ConditionVariable& internalDispatchQueueAppended() {
+ return internalDispatchQueueAppended_.ref();
+ }
+
+ static bool internalDispatchToEventLoop(void*, JS::Dispatchable*);
+ bool usingInternalDispatchQueue() const;
+
+ void operator=(const OffThreadPromiseRuntimeState&) = delete;
+ OffThreadPromiseRuntimeState(const OffThreadPromiseRuntimeState&) = delete;
+
+ public:
+ OffThreadPromiseRuntimeState();
+ ~OffThreadPromiseRuntimeState();
+ void init(JS::DispatchToEventLoopCallback callback, void* closure);
+ void initInternalDispatchQueue();
+ bool initialized() const;
+
+ // If initInternalDispatchQueue() was called, internalDrain() can be
+ // called to periodically drain the dispatch queue before shutdown.
+ void internalDrain(JSContext* cx);
+ bool internalHasPending();
+
+ // shutdown() must be called by the JSRuntime while the JSRuntime is valid.
+ void shutdown(JSContext* cx);
+};
+
+} // namespace js
+
+#endif // vm_OffThreadPromiseRuntimeState_h
diff --git a/js/src/vm/OffThreadScriptCompilation.cpp b/js/src/vm/OffThreadScriptCompilation.cpp
new file mode 100644
index 0000000000..436e77a969
--- /dev/null
+++ b/js/src/vm/OffThreadScriptCompilation.cpp
@@ -0,0 +1,153 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/OffThreadScriptCompilation.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Range.h" // mozilla::Range
+#include "mozilla/Utf8.h" // mozilla::Utf8Unit
+#include "mozilla/Vector.h" // mozilla::Vector
+
+#include <stddef.h> // size_t
+
+#include "jspubtd.h" // js::CurrentThreadCanAccessRuntime
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "js/CompileOptions.h" // JS::ReadOnlyCompileOptions
+#include "js/experimental/JSStencil.h" // JS::CompileToStencilOffThread, JS::FinishCompileToStencilOffThread
+#include "js/SourceText.h" // JS::SourceText
+#include "vm/HelperThreadState.h" // js::StartOffThreadParseScript
+#include "vm/JSContext.h" // JSContext
+#include "vm/Runtime.h" // js::CanUseExtraThreads
+
+using namespace js;
+
+using mozilla::Utf8Unit;
+
+using JS::ReadOnlyCompileOptions;
+
+enum class OffThread { Compile, Decode };
+
+template <typename OptionT>
+static bool CanDoOffThread(JSContext* cx, const OptionT& options,
+ size_t length) {
+ static const size_t TINY_LENGTH = 5 * 1000;
+
+ // These are heuristics which the caller may choose to ignore (e.g., for
+ // testing purposes).
+ if (!options.forceAsync) {
+ // Compiling off the main thread inolves significant overheads.
+ // Don't bother if the script is tiny.
+ if (length < TINY_LENGTH) {
+ return false;
+ }
+ }
+
+ return cx->runtime()->canUseParallelParsing() && CanUseExtraThreads();
+}
+
+JS_PUBLIC_API bool JS::CanCompileOffThread(
+ JSContext* cx, const ReadOnlyCompileOptions& options, size_t length) {
+ return CanDoOffThread(cx, options, length);
+}
+
+JS_PUBLIC_API JS::OffThreadToken* JS::CompileToStencilOffThread(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ JS::SourceText<char16_t>& srcBuf, OffThreadCompileCallback callback,
+ void* callbackData) {
+ MOZ_ASSERT(CanCompileOffThread(cx, options, srcBuf.length()));
+ return StartOffThreadCompileToStencil(cx, options, srcBuf, callback,
+ callbackData);
+}
+
+JS_PUBLIC_API JS::OffThreadToken* JS::CompileToStencilOffThread(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ JS::SourceText<Utf8Unit>& srcBuf, OffThreadCompileCallback callback,
+ void* callbackData) {
+ MOZ_ASSERT(CanCompileOffThread(cx, options, srcBuf.length()));
+ return StartOffThreadCompileToStencil(cx, options, srcBuf, callback,
+ callbackData);
+}
+
+JS_PUBLIC_API JS::OffThreadToken* JS::CompileModuleToStencilOffThread(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ JS::SourceText<char16_t>& srcBuf, OffThreadCompileCallback callback,
+ void* callbackData) {
+ MOZ_ASSERT(CanCompileOffThread(cx, options, srcBuf.length()));
+ return StartOffThreadCompileModuleToStencil(cx, options, srcBuf, callback,
+ callbackData);
+}
+
+JS_PUBLIC_API JS::OffThreadToken* JS::CompileModuleToStencilOffThread(
+ JSContext* cx, const ReadOnlyCompileOptions& options,
+ JS::SourceText<Utf8Unit>& srcBuf, OffThreadCompileCallback callback,
+ void* callbackData) {
+ MOZ_ASSERT(CanCompileOffThread(cx, options, srcBuf.length()));
+ return StartOffThreadCompileModuleToStencil(cx, options, srcBuf, callback,
+ callbackData);
+}
+
+JS_PUBLIC_API JS::OffThreadToken* JS::DecodeStencilOffThread(
+ JSContext* cx, const DecodeOptions& options, const TranscodeBuffer& buffer,
+ size_t cursor, OffThreadCompileCallback callback, void* callbackData) {
+ JS::TranscodeRange range(buffer.begin() + cursor, buffer.length() - cursor);
+ MOZ_ASSERT(CanDecodeOffThread(cx, options, range.length()));
+ return StartOffThreadDecodeStencil(cx, options, range, callback,
+ callbackData);
+}
+
+JS_PUBLIC_API JS::OffThreadToken* JS::DecodeStencilOffThread(
+ JSContext* cx, const DecodeOptions& options, const TranscodeRange& range,
+ OffThreadCompileCallback callback, void* callbackData) {
+ MOZ_ASSERT(CanDecodeOffThread(cx, options, range.length()));
+ return StartOffThreadDecodeStencil(cx, options, range, callback,
+ callbackData);
+}
+
+JS_PUBLIC_API already_AddRefed<JS::Stencil> JS::FinishOffThreadStencil(
+ JSContext* cx, JS::OffThreadToken* token,
+ JS::InstantiationStorage* storage /* = nullptr */) {
+ MOZ_ASSERT(cx);
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ RefPtr<JS::Stencil> stencil =
+ HelperThreadState().finishStencilTask(cx, token, storage);
+ return stencil.forget();
+}
+
+JS_PUBLIC_API void JS::CancelOffThreadToken(JSContext* cx,
+ JS::OffThreadToken* token) {
+ MOZ_ASSERT(cx);
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ HelperThreadState().cancelParseTask(cx->runtime(), token);
+}
+
+JS_PUBLIC_API bool JS::CanDecodeOffThread(JSContext* cx,
+ const DecodeOptions& options,
+ size_t length) {
+ return CanDoOffThread(cx, options, length);
+}
+
+JS_PUBLIC_API JS::OffThreadToken* JS::DecodeMultiStencilsOffThread(
+ JSContext* cx, const DecodeOptions& options, TranscodeSources& sources,
+ OffThreadCompileCallback callback, void* callbackData) {
+#ifdef DEBUG
+ size_t length = 0;
+ for (auto& source : sources) {
+ length += source.range.length();
+ }
+ MOZ_ASSERT(CanDecodeOffThread(cx, options, length));
+#endif
+ return StartOffThreadDecodeMultiStencils(cx, options, sources, callback,
+ callbackData);
+}
+
+JS_PUBLIC_API bool JS::FinishDecodeMultiStencilsOffThread(
+ JSContext* cx, JS::OffThreadToken* token,
+ mozilla::Vector<RefPtr<JS::Stencil>>* stencils) {
+ MOZ_ASSERT(cx);
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ return HelperThreadState().finishMultiStencilsDecodeTask(cx, token, stencils);
+}
diff --git a/js/src/vm/Opcodes.h b/js/src/vm/Opcodes.h
new file mode 100644
index 0000000000..d26ea4de39
--- /dev/null
+++ b/js/src/vm/Opcodes.h
@@ -0,0 +1,3632 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sw=2 et tw=0 ft=c:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Opcodes_h
+#define vm_Opcodes_h
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "js/TypeDecls.h"
+
+// clang-format off
+/*
+ * [SMDOC] Bytecode Definitions
+ *
+ * SpiderMonkey bytecode instructions.
+ *
+ * To use this header, define a macro of the form:
+ *
+ * #define MACRO(op, op_snake, token, length, nuses, ndefs, format) ...
+ *
+ * Then `FOR_EACH_OPCODE(MACRO)` invokes `MACRO` for every opcode.
+ *
+ * Field Description
+ * ----- -----------
+ * op UpperCamelCase form of opcode id
+ * op_snake snake_case form of opcode id
+ * token Pretty-printer string, or null if ugly
+ * length Number of bytes including any immediate operands
+ * nuses Number of stack slots consumed by bytecode, -1 if variadic
+ * ndefs Number of stack slots produced by bytecode
+ * format JOF_ flags describing instruction operand layout, etc.
+ *
+ * For more about `format`, see the comments on the `JOF_` constants defined in
+ * BytecodeUtil.h.
+ *
+ *
+ * [SMDOC] Bytecode Invariants
+ *
+ * Creating scripts that do not follow the rules can lead to undefined
+ * behavior. Bytecode has many consumers, not just the interpreter: JITs,
+ * analyses, the debugger. That's why the rules below apply even to code that
+ * can't be reached in ordinary execution (such as code after an infinite loop
+ * or inside an `if (false)` block).
+ *
+ * The `code()` of a script must be a packed (not aligned) sequence of valid
+ * instructions from start to end. Each instruction has a single byte opcode
+ * followed by a number of operand bytes based on the opcode.
+ *
+ * ## Jump instructions
+ *
+ * Operands named `offset`, `forwardOffset`, or `defaultOffset` are jump
+ * offsets, the distance in bytes from the start of the current instruction to
+ * the start of another instruction in the same script. Operands named
+ * `forwardOffset` or `defaultOffset` must be positive.
+ *
+ * Forward jumps must jump to a `JSOp::JumpTarget` instruction. Backward jumps,
+ * indicated by negative offsets, must jump to a `JSOp::LoopHead` instruction.
+ * Jump offsets can't be zero.
+ *
+ * Needless to say, scripts must not contain overlapping instruction sequences
+ * (in the sense of <https://en.wikipedia.org/wiki/Overlapping_gene>).
+ *
+ * A script's `trynotes` and `scopeNotes` impose further constraints. Each try
+ * note and each scope note marks a region of the bytecode where some invariant
+ * holds, or some cleanup behavior is needed--that there's a for-in iterator in
+ * a particular stack slot, for instance, which must be closed on error. All
+ * paths into the span must establish that invariant. In practice, this means
+ * other code never jumps into the span: the only way in is to execute the
+ * bytecode instruction that sets up the invariant (in our example,
+ * `JSOp::Iter`).
+ *
+ * If a script's `trynotes` (see "Try Notes" in JSScript.h) contain a
+ * `JSTRY_CATCH` or `JSTRY_FINALLY` span, there must be a `JSOp::Try`
+ * instruction immediately before the span and a `JSOp::JumpTarget immediately
+ * after it. Instructions must not jump to this `JSOp::JumpTarget`. (The VM puts
+ * us there on exception.) Furthermore, the instruction sequence immediately
+ * following a `JSTRY_CATCH` span must read `JumpTarget; Exception` or, in
+ * non-function scripts, `JumpTarget; Undefined; SetRval; Exception`. (These
+ * instructions run with an exception pending; other instructions aren't
+ * designed to handle that.)
+ *
+ * Unreachable instructions are allowed, but they have to follow all the rules.
+ *
+ * Control must not reach the end of a script. (Currently, the last instruction
+ * is always JSOp::RetRval.)
+ *
+ * ## Other operands
+ *
+ * Operands named `nameIndex` or `atomIndex` (which appear on instructions that
+ * have `JOF_ATOM` in the `format` field) must be valid indexes into
+ * `script->atoms()`.
+ *
+ * Operands named `argc` (`JOF_ARGC`) are argument counts for call
+ * instructions. `argc` must be small enough that the instruction's nuses is <=
+ * the current stack depth (see "Stack depth" below).
+ *
+ * Operands named `argno` (`JOF_QARG`) refer to an argument of the current
+ * function. `argno` must be in the range `0..script->function()->nargs()`.
+ * Instructions with these operands must appear only in function scripts.
+ *
+ * Operands named `localno` (`JOF_LOCAL`) refer to a local variable stored in
+ * the stack frame. `localno` must be in the range `0..script->nfixed()`.
+ *
+ * Operands named `resumeIndex` (`JOF_RESUMEINDEX`) refer to a resume point in
+ * the current script. `resumeIndex` must be a valid index into
+ * `script->resumeOffsets()`.
+ *
+ * Operands named `hops` and `slot` (`JOF_ENVCOORD`) refer a slot in an
+ * `EnvironmentObject`. At run time, they must point to a fixed slot in an
+ * object on the current environment chain. See `EnvironmentCoordinates`.
+ *
+ * Operands with the following names must be valid indexes into
+ * `script->gcthings()`, and the pointer in the vector must point to the right
+ * type of thing:
+ *
+ * - `objectIndex` (`JOF_OBJECT`): `PlainObject*` or `ArrayObject*`
+ * - `baseobjIndex` (`JOF_OBJECT`): `PlainObject*`
+ * - `funcIndex` (`JOF_OBJECT`): `JSFunction*`
+ * - `regexpIndex` (`JOF_REGEXP`): `RegExpObject*`
+ * - `shapeIndex` (`JOF_SHAPE`): `Shape*`
+ * - `scopeIndex` (`JOF_SCOPE`): `Scope*`
+ * - `lexicalScopeIndex` (`JOF_SCOPE`): `LexicalScope*`
+ * - `classBodyScopeIndex` (`JOF_SCOPE`): `ClassBodyScope*`
+ * - `withScopeIndex` (`JOF_SCOPE`): `WithScope*`
+ * - `bigIntIndex` (`JOF_BIGINT`): `BigInt*`
+ *
+ * Operands named `icIndex` (`JOF_ICINDEX`) must be exactly the number of
+ * preceding instructions in the script that have the JOF_IC flag.
+ * (Rationale: Each JOF_IC instruction has a unique entry in
+ * `script->jitScript()->icEntries()`. At run time, in the bytecode
+ * interpreter, we have to find that entry. We could store the IC index as an
+ * operand to each JOF_IC instruction, but it's more memory-efficient to use a
+ * counter and reset the counter to `icIndex` after each jump.)
+ *
+ * ## Stack depth
+ *
+ * Each instruction has a compile-time stack depth, the number of values on the
+ * interpreter stack just before executing the instruction. It isn't explicitly
+ * present in the bytecode itself, but (for reachable instructions, anyway)
+ * it's a function of the bytecode.
+ *
+ * - The first instruction has stack depth 0.
+ *
+ * - Each successor of an instruction X has a stack depth equal to
+ *
+ * X's stack depth - `js::StackUses(X)` + `js::StackDefs(X)`
+ *
+ * except for `JSOp::Case` (below).
+ *
+ * X's "successors" are: the next instruction in the script, if
+ * `js::FlowsIntoNext(op)` is true for X's opcode; one or more
+ * `JSOp::JumpTarget`s elsewhere, if X is a forward jump or
+ * `JSOp::TableSwitch`; and/or a `JSOp::LoopHead` if it's a backward jump.
+ *
+ * - `JSOp::Case` is a special case because its stack behavior is eccentric.
+ * The formula above is correct for the next instruction. The jump target
+ * has a stack depth that is 1 less.
+ *
+ * - The `JSOp::JumpTarget` instruction immediately following a `JSTRY_CATCH`
+ * or `JSTRY_FINALLY` span has the same stack depth as the `JSOp::Try`
+ * instruction that precedes the span.
+ *
+ * Every instruction covered by the `JSTRY_CATCH` or `JSTRY_FINALLY` span
+ * must have a stack depth >= that value, so that error recovery is
+ * guaranteed to find enough values on the stack to resume there.
+ *
+ * - `script->nslots() - script->nfixed()` must be >= the maximum stack
+ * depth of any instruction in `script`. (The stack frame must be big
+ * enough to run the code.)
+ *
+ * `BytecodeParser::parse()` computes stack depths for every reachable
+ * instruction in a script.
+ *
+ * ## Scopes and environments
+ *
+ * As with stack depth, each instruction has a static scope, which is a
+ * compile-time characterization of the eventual run-time environment chain
+ * when that instruction executes. Just as every instruction has a stack budget
+ * (nuses/ndefs), every instruction either pushes a scope, pops a scope, or
+ * neither. The same successor relation applies as above.
+ *
+ * Every scope used in a script is stored in the `JSScript::gcthings()` vector.
+ * They can be accessed using `getScope(index)` if you know what `index` to
+ * pass.
+ *
+ * The scope of every instruction (that's reachable via the successor relation)
+ * is given in two independent ways: by the bytecode itself and by the scope
+ * notes. The two sources must agree.
+ *
+ * ## Further rules
+ *
+ * All reachable instructions must be reachable without taking any backward
+ * edges.
+ *
+ * Instructions with the `JOF_CHECKSLOPPY` flag must not be used in strict mode
+ * code. `JOF_CHECKSTRICT` instructions must not be used in nonstrict code.
+ *
+ * Many instructions have their own additional rules. These are documented on
+ * the various opcodes below (look for the word "must").
+ */
+// clang-format on
+
+// clang-format off
+/*
+ * SpiderMonkey bytecode categorization (as used in generated documentation):
+ *
+ * [Index]
+ * [Constants]
+ * [Compound primitives]
+ * Record literals
+ * Tuple literals
+ * [Expressions]
+ * Unary operators
+ * Binary operators
+ * Conversions
+ * Other expressions
+ * [Objects]
+ * Creating objects
+ * Defining properties
+ * Accessing properties
+ * Super
+ * Enumeration
+ * Iteration
+ * SetPrototype
+ * Array literals
+ * RegExp literals
+ * Built-in objects
+ * [Functions]
+ * Creating functions
+ * Creating constructors
+ * Calls
+ * Generators and async functions
+ * [Control flow]
+ * Jump targets
+ * Jumps
+ * Return
+ * Exceptions
+ * [Variables and scopes]
+ * Initialization
+ * Looking up bindings
+ * Getting binding values
+ * Setting binding values
+ * Entering and leaving environments
+ * Creating and deleting bindings
+ * Function environment setup
+ * [Stack operations]
+ * [Other]
+ */
+// clang-format on
+
+// clang-format off
+#define FOR_EACH_OPCODE(MACRO) \
+ /*
+ * Push `undefined`.
+ *
+ * Category: Constants
+ * Operands:
+ * Stack: => undefined
+ */ \
+ MACRO(Undefined, undefined, "", 1, 0, 1, JOF_BYTE) \
+ /*
+ * Push `null`.
+ *
+ * Category: Constants
+ * Operands:
+ * Stack: => null
+ */ \
+ MACRO(Null, null, "null", 1, 0, 1, JOF_BYTE) \
+ /*
+ * Push a boolean constant.
+ *
+ * Category: Constants
+ * Operands:
+ * Stack: => true/false
+ */ \
+ MACRO(False, false_, "false", 1, 0, 1, JOF_BYTE) \
+ MACRO(True, true_, "true", 1, 0, 1, JOF_BYTE) \
+ /*
+ * Push the `int32_t` immediate operand as an `Int32Value`.
+ *
+ * `JSOp::Zero`, `JSOp::One`, `JSOp::Int8`, `JSOp::Uint16`, and `JSOp::Uint24`
+ * are all compact encodings for `JSOp::Int32`.
+ *
+ * Category: Constants
+ * Operands: int32_t val
+ * Stack: => val
+ */ \
+ MACRO(Int32, int32, NULL, 5, 0, 1, JOF_INT32) \
+ /*
+ * Push the number `0`.
+ *
+ * Category: Constants
+ * Operands:
+ * Stack: => 0
+ */ \
+ MACRO(Zero, zero, "0", 1, 0, 1, JOF_BYTE) \
+ /*
+ * Push the number `1`.
+ *
+ * Category: Constants
+ * Operands:
+ * Stack: => 1
+ */ \
+ MACRO(One, one, "1", 1, 0, 1, JOF_BYTE) \
+ /*
+ * Push the `int8_t` immediate operand as an `Int32Value`.
+ *
+ * Category: Constants
+ * Operands: int8_t val
+ * Stack: => val
+ */ \
+ MACRO(Int8, int8, NULL, 2, 0, 1, JOF_INT8) \
+ /*
+ * Push the `uint16_t` immediate operand as an `Int32Value`.
+ *
+ * Category: Constants
+ * Operands: uint16_t val
+ * Stack: => val
+ */ \
+ MACRO(Uint16, uint16, NULL, 3, 0, 1, JOF_UINT16) \
+ /*
+ * Push the `uint24_t` immediate operand as an `Int32Value`.
+ *
+ * Category: Constants
+ * Operands: uint24_t val
+ * Stack: => val
+ */ \
+ MACRO(Uint24, uint24, NULL, 4, 0, 1, JOF_UINT24) \
+ /*
+ * Push the 64-bit floating-point immediate operand as a `DoubleValue`.
+ *
+ * If the operand is a NaN, it must be the canonical NaN (see
+ * `JS::detail::CanonicalizeNaN`).
+ *
+ * Category: Constants
+ * Operands: double val
+ * Stack: => val
+ */ \
+ MACRO(Double, double_, NULL, 9, 0, 1, JOF_DOUBLE) \
+ /*
+ * Push the BigInt constant `script->getBigInt(bigIntIndex)`.
+ *
+ * Category: Constants
+ * Operands: uint32_t bigIntIndex
+ * Stack: => bigint
+ */ \
+ MACRO(BigInt, big_int, NULL, 5, 0, 1, JOF_BIGINT) \
+ /*
+ * Push the string constant `script->getAtom(atomIndex)`.
+ *
+ * Category: Constants
+ * Operands: uint32_t atomIndex
+ * Stack: => string
+ */ \
+ MACRO(String, string, NULL, 5, 0, 1, JOF_STRING) \
+ /*
+ * Push a well-known symbol.
+ *
+ * `symbol` must be in range for `JS::SymbolCode`.
+ *
+ * Category: Constants
+ * Operands: uint8_t symbol (the JS::SymbolCode of the symbol to use)
+ * Stack: => symbol
+ */ \
+ MACRO(Symbol, symbol, NULL, 2, 0, 1, JOF_UINT8) \
+ /*
+ * Pop the top value on the stack, discard it, and push `undefined`.
+ *
+ * Implements: [The `void` operator][1], step 3.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-void-operator
+ *
+ * Category: Expressions
+ * Type: Unary operators
+ * Operands:
+ * Stack: val => undefined
+ */ \
+ MACRO(Void, void_, NULL, 1, 1, 1, JOF_BYTE) \
+ /*
+ * [The `typeof` operator][1].
+ *
+ * Infallible. The result is always a string that depends on the [type][2]
+ * of `val`.
+ *
+ * `JSOp::Typeof` and `JSOp::TypeofExpr` are the same except
+ * that--amazingly--`JSOp::Typeof` affects the behavior of an immediately
+ * *preceding* `JSOp::GetName` or `JSOp::GetGName` instruction! This is how
+ * we implement [`typeof`][1] step 2, making `typeof nonExistingVariable`
+ * return `"undefined"` instead of throwing a ReferenceError.
+ *
+ * In a global scope:
+ *
+ * - `typeof x` compiles to `GetGName "x"; Typeof`.
+ * - `typeof (0, x)` compiles to `GetGName "x"; TypeofExpr`.
+ *
+ * Emitting the same bytecode for these two expressions would be a bug.
+ * Per spec, the latter throws a ReferenceError if `x` doesn't exist.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-typeof-operator
+ * [2]: https://tc39.es/ecma262/#sec-ecmascript-language-types
+ *
+ * Category: Expressions
+ * Type: Unary operators
+ * Operands:
+ * Stack: val => (typeof val)
+ */ \
+ MACRO(Typeof, typeof_, NULL, 1, 1, 1, JOF_BYTE|JOF_IC) \
+ MACRO(TypeofExpr, typeof_expr, NULL, 1, 1, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * [The unary `+` operator][1].
+ *
+ * `+val` doesn't do any actual math. It just calls [ToNumber][2](val).
+ *
+ * The conversion can call `.toString()`/`.valueOf()` methods and can
+ * throw. The result on success is always a Number. (Per spec, unary `-`
+ * supports BigInts, but unary `+` does not.)
+ *
+ * [1]: https://tc39.es/ecma262/#sec-unary-plus-operator
+ * [2]: https://tc39.es/ecma262/#sec-tonumber
+ *
+ * Category: Expressions
+ * Type: Unary operators
+ * Operands:
+ * Stack: val => (+val)
+ */ \
+ MACRO(Pos, pos, "+ ", 1, 1, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * [The unary `-` operator][1].
+ *
+ * Convert `val` to a numeric value, then push `-val`. The conversion can
+ * call `.toString()`/`.valueOf()` methods and can throw. The result on
+ * success is always numeric.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-unary-minus-operator
+ *
+ * Category: Expressions
+ * Type: Unary operators
+ * Operands:
+ * Stack: val => (-val)
+ */ \
+ MACRO(Neg, neg, "- ", 1, 1, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * [The bitwise NOT operator][1] (`~`).
+ *
+ * `val` is converted to an integer, then bitwise negated. The conversion
+ * can call `.toString()`/`.valueOf()` methods and can throw. The result on
+ * success is always an Int32 or BigInt value.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-bitwise-not-operator
+ *
+ * Category: Expressions
+ * Type: Unary operators
+ * Operands:
+ * Stack: val => (~val)
+ */ \
+ MACRO(BitNot, bit_not, "~", 1, 1, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * [The logical NOT operator][1] (`!`).
+ *
+ * `val` is first converted with [ToBoolean][2], then logically
+ * negated. The result is always a boolean value. This does not call
+ * user-defined methods and can't throw.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-logical-not-operator
+ * [2]: https://tc39.es/ecma262/#sec-toboolean
+ *
+ * Category: Expressions
+ * Type: Unary operators
+ * Operands:
+ * Stack: val => (!val)
+ */ \
+ MACRO(Not, not_, "!", 1, 1, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * [Binary bitwise operations][1] (`|`, `^`, `&`).
+ *
+ * The arguments are converted to integers first. The conversion can call
+ * `.toString()`/`.valueOf()` methods and can throw. The result on success
+ * is always an Int32 or BigInt Value.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-binary-bitwise-operators
+ *
+ * Category: Expressions
+ * Type: Binary operators
+ * Operands:
+ * Stack: lval, rval => (lval OP rval)
+ */ \
+ MACRO(BitOr, bit_or, "|", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ MACRO(BitXor, bit_xor, "^", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ MACRO(BitAnd, bit_and, "&", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * Loose equality operators (`==` and `!=`).
+ *
+ * Pop two values, compare them, and push the boolean result. The
+ * comparison may perform conversions that call `.toString()`/`.valueOf()`
+ * methods and can throw.
+ *
+ * Implements: [Abstract Equality Comparison][1].
+ *
+ * [1]: https://tc39.es/ecma262/#sec-abstract-equality-comparison
+ *
+ * Category: Expressions
+ * Type: Binary operators
+ * Operands:
+ * Stack: lval, rval => (lval OP rval)
+ */ \
+ MACRO(Eq, eq, "==", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ MACRO(Ne, ne, "!=", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * Strict equality operators (`===` and `!==`).
+ *
+ * Pop two values, check whether they're equal, and push the boolean
+ * result. This does not call user-defined methods and can't throw
+ * (except possibly due to OOM while flattening a string).
+ *
+ * Implements: [Strict Equality Comparison][1].
+ *
+ * [1]: https://tc39.es/ecma262/#sec-strict-equality-comparison
+ *
+ * Category: Expressions
+ * Type: Binary operators
+ * Operands:
+ * Stack: lval, rval => (lval OP rval)
+ */ \
+ MACRO(StrictEq, strict_eq, "===", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ MACRO(StrictNe, strict_ne, "!==", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * Relative operators (`<`, `>`, `<=`, `>=`).
+ *
+ * Pop two values, compare them, and push the boolean result. The
+ * comparison may perform conversions that call `.toString()`/`.valueOf()`
+ * methods and can throw.
+ *
+ * Implements: [Relational Operators: Evaluation][1].
+ *
+ * [1]: https://tc39.es/ecma262/#sec-relational-operators-runtime-semantics-evaluation
+ *
+ * Category: Expressions
+ * Type: Binary operators
+ * Operands:
+ * Stack: lval, rval => (lval OP rval)
+ */ \
+ MACRO(Lt, lt, "<", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ MACRO(Gt, gt, ">", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ MACRO(Le, le, "<=", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ MACRO(Ge, ge, ">=", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * [The `instanceof` operator][1].
+ *
+ * This throws a `TypeError` if `target` is not an object. It calls
+ * `target[Symbol.hasInstance](value)` if the method exists. On success,
+ * the result is always a boolean value.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-instanceofoperator
+ *
+ * Category: Expressions
+ * Type: Binary operators
+ * Operands:
+ * Stack: value, target => (value instanceof target)
+ */ \
+ MACRO(Instanceof, instanceof, "instanceof", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * [The `in` operator][1].
+ *
+ * Push `true` if `obj` has a property with the key `id`. Otherwise push `false`.
+ *
+ * This throws a `TypeError` if `obj` is not an object. This can fire
+ * proxy hooks and can throw. On success, the result is always a boolean
+ * value.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-relational-operators-runtime-semantics-evaluation
+ *
+ * Category: Expressions
+ * Type: Binary operators
+ * Operands:
+ * Stack: id, obj => (id in obj)
+ */ \
+ MACRO(In, in_, "in", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * [Bitwise shift operators][1] (`<<`, `>>`, `>>>`).
+ *
+ * Pop two values, convert them to integers, perform a bitwise shift, and
+ * push the result.
+ *
+ * Conversion can call `.toString()`/`.valueOf()` methods and can throw.
+ * The result on success is always an Int32 or BigInt Value.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-bitwise-shift-operators
+ *
+ * Category: Expressions
+ * Type: Binary operators
+ * Operands:
+ * Stack: lval, rval => (lval OP rval)
+ */ \
+ MACRO(Lsh, lsh, "<<", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ MACRO(Rsh, rsh, ">>", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ MACRO(Ursh, ursh, ">>>", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * [The binary `+` operator][1].
+ *
+ * Pop two values, convert them to primitive values, add them, and push the
+ * result. If both values are numeric, add them; if either is a
+ * string, do string concatenation instead.
+ *
+ * The conversion can call `.toString()`/`.valueOf()` methods and can throw.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-addition-operator-plus-runtime-semantics-evaluation
+ *
+ * Category: Expressions
+ * Type: Binary operators
+ * Operands:
+ * Stack: lval, rval => (lval + rval)
+ */ \
+ MACRO(Add, add, "+", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * [The binary `-` operator][1].
+ *
+ * Pop two values, convert them to numeric values, subtract the top value
+ * from the other one, and push the result.
+ *
+ * The conversion can call `.toString()`/`.valueOf()` methods and can
+ * throw. On success, the result is always numeric.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-subtraction-operator-minus-runtime-semantics-evaluation
+ *
+ * Category: Expressions
+ * Type: Binary operators
+ * Operands:
+ * Stack: lval, rval => (lval - rval)
+ */ \
+ MACRO(Sub, sub, "-", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * Add or subtract 1.
+ *
+ * `val` must already be a numeric value, such as the result of
+ * `JSOp::ToNumeric`.
+ *
+ * Implements: [The `++` and `--` operators][1], step 3 of each algorithm.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-postfix-increment-operator
+ *
+ * Category: Expressions
+ * Type: Binary operators
+ * Operands:
+ * Stack: val => (val +/- 1)
+ */ \
+ MACRO(Inc, inc, NULL, 1, 1, 1, JOF_BYTE|JOF_IC) \
+ MACRO(Dec, dec, NULL, 1, 1, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * [The multiplicative operators][1] (`*`, `/`, `%`).
+ *
+ * Pop two values, convert them to numeric values, do math, and push the
+ * result.
+ *
+ * The conversion can call `.toString()`/`.valueOf()` methods and can
+ * throw. On success, the result is always numeric.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-multiplicative-operators-runtime-semantics-evaluation
+ *
+ * Category: Expressions
+ * Type: Binary operators
+ * Operands:
+ * Stack: lval, rval => (lval OP rval)
+ */ \
+ MACRO(Mul, mul, "*", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ MACRO(Div, div, "/", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ MACRO(Mod, mod, "%", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * [The exponentiation operator][1] (`**`).
+ *
+ * Pop two values, convert them to numeric values, do exponentiation, and
+ * push the result. The top value is the exponent.
+ *
+ * The conversion can call `.toString()`/`.valueOf()` methods and can
+ * throw. This throws a RangeError if both values are BigInts and the
+ * exponent is negative.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-exp-operator
+ *
+ * Category: Expressions
+ * Type: Binary operators
+ * Operands:
+ * Stack: lval, rval => (lval ** rval)
+ */ \
+ MACRO(Pow, pow, "**", 1, 2, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * Convert a value to a property key.
+ *
+ * Implements: [ToPropertyKey][1], except that if the result would be the
+ * string representation of some integer in the range 0..2^31, we push the
+ * corresponding Int32 value instead. This is because the spec insists that
+ * array indices are strings, whereas for us they are integers.
+ *
+ * This is used for code like `++obj[index]`, which must do both a
+ * `JSOp::GetElem` and a `JSOp::SetElem` with the same property key. Both
+ * instructions would convert `index` to a property key for us, but the
+ * spec says to convert it only once.
+ *
+ * The conversion can call `.toString()`/`.valueOf()` methods and can
+ * throw.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-topropertykey
+ *
+ * Category: Expressions
+ * Type: Conversions
+ * Operands:
+ * Stack: propertyNameValue => propertyKey
+ */ \
+ MACRO(ToPropertyKey, to_property_key, NULL, 1, 1, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * Convert a value to a numeric value (a Number or BigInt).
+ *
+ * Implements: [ToNumeric][1](val).
+ *
+ * Note: This is used to implement [`++` and `--`][2]. Surprisingly, it's
+ * not possible to get the right behavior using `JSOp::Add` and `JSOp::Sub`
+ * alone. For one thing, `JSOp::Add` sometimes does string concatenation,
+ * while `++` always does numeric addition. More fundamentally, the result
+ * of evaluating `x--` is ToNumeric(old value of `x`), a value that the
+ * sequence `GetLocal "x"; One; Sub; SetLocal "x"` does not give us.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-tonumeric
+ * [2]: https://tc39.es/ecma262/#sec-postfix-increment-operator
+ *
+ * Category: Expressions
+ * Type: Conversions
+ * Operands:
+ * Stack: val => ToNumeric(val)
+ */ \
+ MACRO(ToNumeric, to_numeric, NULL, 1, 1, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * Convert a value to a string.
+ *
+ * Implements: [ToString][1](val).
+ *
+ * Note: This is used in code for template literals, like `${x}${y}`. Each
+ * substituted value must be converted using ToString. `JSOp::Add` by itself
+ * would do a slightly wrong kind of conversion (hint="number" rather than
+ * hint="string").
+ *
+ * [1]: https://tc39.es/ecma262/#sec-tostring
+ *
+ * Category: Expressions
+ * Type: Conversions
+ * Stack: val => ToString(val)
+ */ \
+ MACRO(ToString, to_string, NULL, 1, 1, 1, JOF_BYTE) \
+ /*
+ * Test whether the value on top of the stack is `NullValue` or
+ * `UndefinedValue` and push the boolean result.
+ *
+ * Category: Expressions
+ * Type: Other expressions
+ * Operands:
+ * Stack: val => val, IsNullOrUndefined(val)
+ */ \
+ MACRO(IsNullOrUndefined, is_null_or_undefined, NULL, 1, 1, 2, JOF_BYTE) \
+ /*
+ * Push the global `this` value. Not to be confused with the `globalThis`
+ * property on the global.
+ *
+ * This must be used only in scopes where `this` refers to the global
+ * `this`.
+ *
+ * Category: Expressions
+ * Type: Other expressions
+ * Operands:
+ * Stack: => this
+ */ \
+ MACRO(GlobalThis, global_this, NULL, 1, 0, 1, JOF_BYTE) \
+ /*
+ * Push the global `this` value for non-syntactic scope. Not to be confused
+ * with the `globalThis` property on the global.
+ *
+ * This must be used only in scopes where `this` refers to the global
+ * `this`.
+ *
+ * Category: Expressions
+ * Type: Other expressions
+ * Operands:
+ * Stack: => this
+ */ \
+ MACRO(NonSyntacticGlobalThis, non_syntactic_global_this, NULL, 1, 0, 1, JOF_BYTE) \
+ /*
+ * Push the value of `new.target`.
+ *
+ * The result is a constructor or `undefined`.
+ *
+ * This must be used only in non-arrow function scripts.
+ *
+ * Implements: [GetNewTarget][1].
+ *
+ * [1]: https://tc39.es/ecma262/#sec-getnewtarget
+ *
+ * Category: Expressions
+ * Type: Other expressions
+ * Operands:
+ * Stack: => new.target
+ */ \
+ MACRO(NewTarget, new_target, NULL, 1, 0, 1, JOF_BYTE) \
+ /*
+ * Dynamic import of the module specified by the string value on the top of
+ * the stack.
+ *
+ * Implements: [Import Calls][1].
+ *
+ * [1]: https://tc39.es/ecma262/#sec-import-calls
+ *
+ * Category: Expressions
+ * Type: Other expressions
+ * Operands:
+ * Stack: moduleId, options => promise
+ */ \
+ MACRO(DynamicImport, dynamic_import, NULL, 1, 2, 1, JOF_BYTE) \
+ /*
+ * Push the `import.meta` object.
+ *
+ * This must be used only in module code.
+ *
+ * Category: Expressions
+ * Type: Other expressions
+ * Operands:
+ * Stack: => import.meta
+ */ \
+ MACRO(ImportMeta, import_meta, NULL, 1, 0, 1, JOF_BYTE) \
+ /*
+ * Create and push a new object with no properties.
+ *
+ * Category: Objects
+ * Type: Creating objects
+ * Operands:
+ * Stack: => obj
+ */ \
+ MACRO(NewInit, new_init, NULL, 1, 0, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * Create and push a new object of a predetermined shape.
+ *
+ * The new object has the shape `script->getShape(shapeIndex)`.
+ * Subsequent `InitProp` instructions must fill in all slots of the new
+ * object before it is used in any other way.
+ *
+ * Category: Objects
+ * Type: Creating objects
+ * Operands: uint32_t shapeIndex
+ * Stack: => obj
+ */ \
+ MACRO(NewObject, new_object, NULL, 5, 0, 1, JOF_SHAPE|JOF_IC) \
+ /*
+ * Push a preconstructed object.
+ *
+ * Going one step further than `JSOp::NewObject`, this instruction doesn't
+ * just reuse the shape--it actually pushes the preconstructed object
+ * `script->getObject(objectIndex)` right onto the stack. The object must
+ * be a singleton `PlainObject` or `ArrayObject`.
+ *
+ * The spec requires that an *ObjectLiteral* or *ArrayLiteral* creates a
+ * new object every time it's evaluated, so this instruction must not be
+ * used anywhere it might be executed more than once.
+ *
+ * This may only be used in non-function run-once scripts. Care also must
+ * be taken to not emit in loops or other constructs where it could run
+ * more than once.
+ *
+ * Category: Objects
+ * Type: Creating objects
+ * Operands: uint32_t objectIndex
+ * Stack: => obj
+ */ \
+ MACRO(Object, object, NULL, 5, 0, 1, JOF_OBJECT) \
+ /*
+ * Create and push a new ordinary object with the provided [[Prototype]].
+ *
+ * This is used to create the `.prototype` object for derived classes.
+ *
+ * Category: Objects
+ * Type: Creating objects
+ * Operands:
+ * Stack: proto => obj
+ */ \
+ MACRO(ObjWithProto, obj_with_proto, NULL, 1, 1, 1, JOF_BYTE) \
+ /*
+ * Define a data property on an object.
+ *
+ * `obj` must be an object.
+ *
+ * Implements: [CreateDataPropertyOrThrow][1] as used in
+ * [PropertyDefinitionEvaluation][2] of regular and shorthand
+ * *PropertyDefinition*s.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-createdatapropertyorthrow
+ * [2]: https://tc39.es/ecma262/#sec-object-initializer-runtime-semantics-propertydefinitionevaluation
+ *
+ * Category: Objects
+ * Type: Defining properties
+ * Operands: uint32_t nameIndex
+ * Stack: obj, val => obj
+ */ \
+ MACRO(InitProp, init_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT|JOF_IC) \
+ /*
+ * Like `JSOp::InitProp`, but define a non-enumerable property.
+ *
+ * This is used to define class methods.
+ *
+ * Implements: [PropertyDefinitionEvaluation][1] for methods, steps 3 and
+ * 4, when *enumerable* is false.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-method-definitions-runtime-semantics-propertydefinitionevaluation
+ *
+ * Category: Objects
+ * Type: Defining properties
+ * Operands: uint32_t nameIndex
+ * Stack: obj, val => obj
+ */ \
+ MACRO(InitHiddenProp, init_hidden_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT|JOF_IC) \
+ /*
+ * Like `JSOp::InitProp`, but define a non-enumerable, non-writable,
+ * non-configurable property.
+ *
+ * This is used to define the `.prototype` property on classes.
+ *
+ * Implements: [MakeConstructor][1], step 8, when *writablePrototype* is
+ * false.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-makeconstructor
+ *
+ * Category: Objects
+ * Type: Defining properties
+ * Operands: uint32_t nameIndex
+ * Stack: obj, val => obj
+ */ \
+ MACRO(InitLockedProp, init_locked_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT|JOF_IC) \
+ /*
+ * Define a data property on `obj` with property key `id` and value `val`.
+ *
+ * `obj` must be an object.
+ *
+ * Implements: [CreateDataPropertyOrThrow][1]. This instruction is used for
+ * object literals like `{0: val}` and `{[id]: val}`, and methods like
+ * `*[Symbol.iterator]() {}`.
+ *
+ * `JSOp::InitHiddenElem` is the same but defines a non-enumerable property,
+ * for class methods.
+ * `JSOp::InitLockedElem` is the same but defines a non-enumerable, non-writable, non-configurable property,
+ * for private class methods.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-createdatapropertyorthrow
+ *
+ * Category: Objects
+ * Type: Defining properties
+ * Operands:
+ * Stack: obj, id, val => obj
+ */ \
+ MACRO(InitElem, init_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT|JOF_IC) \
+ MACRO(InitHiddenElem, init_hidden_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT|JOF_IC) \
+ MACRO(InitLockedElem, init_locked_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT|JOF_IC) \
+ /*
+ * Define an accessor property on `obj` with the given `getter`.
+ * `nameIndex` gives the property name.
+ *
+ * `obj` must be an object and `getter` must be a function.
+ *
+ * `JSOp::InitHiddenPropGetter` is the same but defines a non-enumerable
+ * property, for getters in classes.
+ *
+ * Category: Objects
+ * Type: Defining properties
+ * Operands: uint32_t nameIndex
+ * Stack: obj, getter => obj
+ */ \
+ MACRO(InitPropGetter, init_prop_getter, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT) \
+ MACRO(InitHiddenPropGetter, init_hidden_prop_getter, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT) \
+ /*
+ * Define an accessor property on `obj` with property key `id` and the given `getter`.
+ *
+ * This is used to implement getters like `get [id]() {}` or `get 0() {}`.
+ *
+ * `obj` must be an object and `getter` must be a function.
+ *
+ * `JSOp::InitHiddenElemGetter` is the same but defines a non-enumerable
+ * property, for getters in classes.
+ *
+ * Category: Objects
+ * Type: Defining properties
+ * Operands:
+ * Stack: obj, id, getter => obj
+ */ \
+ MACRO(InitElemGetter, init_elem_getter, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT) \
+ MACRO(InitHiddenElemGetter, init_hidden_elem_getter, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT) \
+ /*
+ * Define an accessor property on `obj` with the given `setter`.
+ *
+ * This is used to implement ordinary setters like `set foo(v) {}`.
+ *
+ * `obj` must be an object and `setter` must be a function.
+ *
+ * `JSOp::InitHiddenPropSetter` is the same but defines a non-enumerable
+ * property, for setters in classes.
+ *
+ * Category: Objects
+ * Type: Defining properties
+ * Operands: uint32_t nameIndex
+ * Stack: obj, setter => obj
+ */ \
+ MACRO(InitPropSetter, init_prop_setter, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT) \
+ MACRO(InitHiddenPropSetter, init_hidden_prop_setter, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT) \
+ /*
+ * Define an accesssor property on `obj` with property key `id` and the
+ * given `setter`.
+ *
+ * This is used to implement setters with computed property keys or numeric
+ * keys.
+ *
+ * `JSOp::InitHiddenElemSetter` is the same but defines a non-enumerable
+ * property, for setters in classes.
+ *
+ * Category: Objects
+ * Type: Defining properties
+ * Operands:
+ * Stack: obj, id, setter => obj
+ */ \
+ MACRO(InitElemSetter, init_elem_setter, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT) \
+ MACRO(InitHiddenElemSetter, init_hidden_elem_setter, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT) \
+ /*
+ * Get the value of the property `obj.name`. This can call getters and
+ * proxy traps.
+ *
+ * Implements: [GetV][1], [GetValue][2] step 5.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-getv
+ * [2]: https://tc39.es/ecma262/#sec-getvalue
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands: uint32_t nameIndex
+ * Stack: obj => obj[name]
+ */ \
+ MACRO(GetProp, get_prop, NULL, 5, 1, 1, JOF_ATOM|JOF_PROP|JOF_IC) \
+ /*
+ * Get the value of the property `obj[key]`.
+ *
+ * Implements: [GetV][1], [GetValue][2] step 5.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-getv
+ * [2]: https://tc39.es/ecma262/#sec-getvalue
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands:
+ * Stack: obj, key => obj[key]
+ */ \
+ MACRO(GetElem, get_elem, NULL, 1, 2, 1, JOF_BYTE|JOF_ELEM|JOF_IC) \
+ /*
+ * Non-strict assignment to a property, `obj.name = val`.
+ *
+ * This throws a TypeError if `obj` is null or undefined. If it's a
+ * primitive value, the property is set on ToObject(`obj`), typically with
+ * no effect.
+ *
+ * Implements: [PutValue][1] step 6 for non-strict code.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-putvalue
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands: uint32_t nameIndex
+ * Stack: obj, val => val
+ */ \
+ MACRO(SetProp, set_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPSET|JOF_CHECKSLOPPY|JOF_IC) \
+ /*
+ * Like `JSOp::SetProp`, but for strict mode code. Throw a TypeError if
+ * `obj[key]` exists but is non-writable, if it's an accessor property with
+ * no setter, or if `obj` is a primitive value.
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands: uint32_t nameIndex
+ * Stack: obj, val => val
+ */ \
+ MACRO(StrictSetProp, strict_set_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPSET|JOF_CHECKSTRICT|JOF_IC) \
+ /*
+ * Non-strict assignment to a property, `obj[key] = val`.
+ *
+ * Implements: [PutValue][1] step 6 for non-strict code.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-putvalue
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands:
+ * Stack: obj, key, val => val
+ */ \
+ MACRO(SetElem, set_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPSET|JOF_CHECKSLOPPY|JOF_IC) \
+ /*
+ * Like `JSOp::SetElem`, but for strict mode code. Throw a TypeError if
+ * `obj[key]` exists but is non-writable, if it's an accessor property with
+ * no setter, or if `obj` is a primitive value.
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands:
+ * Stack: obj, key, val => val
+ */ \
+ MACRO(StrictSetElem, strict_set_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPSET|JOF_CHECKSTRICT|JOF_IC) \
+ /*
+ * Delete a property from `obj`. Push true on success, false if the
+ * property existed but could not be deleted. This implements `delete
+ * obj.name` in non-strict code.
+ *
+ * Throws if `obj` is null or undefined. Can call proxy traps.
+ *
+ * Implements: [`delete obj.propname`][1] step 5 in non-strict code.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-delete-operator-runtime-semantics-evaluation
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands: uint32_t nameIndex
+ * Stack: obj => succeeded
+ */ \
+ MACRO(DelProp, del_prop, NULL, 5, 1, 1, JOF_ATOM|JOF_PROP|JOF_CHECKSLOPPY) \
+ /*
+ * Like `JSOp::DelProp`, but for strict mode code. Push `true` on success,
+ * else throw a TypeError.
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands: uint32_t nameIndex
+ * Stack: obj => succeeded
+ */ \
+ MACRO(StrictDelProp, strict_del_prop, NULL, 5, 1, 1, JOF_ATOM|JOF_PROP|JOF_CHECKSTRICT) \
+ /*
+ * Delete the property `obj[key]` and push `true` on success, `false`
+ * if the property existed but could not be deleted.
+ *
+ * This throws if `obj` is null or undefined. Can call proxy traps.
+ *
+ * Implements: [`delete obj[key]`][1] step 5 in non-strict code.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-delete-operator-runtime-semantics-evaluation
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands:
+ * Stack: obj, key => succeeded
+ */ \
+ MACRO(DelElem, del_elem, NULL, 1, 2, 1, JOF_BYTE|JOF_ELEM|JOF_CHECKSLOPPY) \
+ /*
+ * Like `JSOp::DelElem, but for strict mode code. Push `true` on success,
+ * else throw a TypeError.
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands:
+ * Stack: obj, key => succeeded
+ */ \
+ MACRO(StrictDelElem, strict_del_elem, NULL, 1, 2, 1, JOF_BYTE|JOF_ELEM|JOF_CHECKSTRICT) \
+ /*
+ * Push true if `obj` has an own property `id`.
+ *
+ * Note that `obj` is the top value, like `JSOp::In`.
+ *
+ * This opcode is not used for normal JS. Self-hosted code uses it by
+ * calling the intrinsic `hasOwn(id, obj)`. For example,
+ * `Object.prototype.hasOwnProperty` is implemented this way (see
+ * js/src/builtin/Object.js).
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands:
+ * Stack: id, obj => (obj.hasOwnProperty(id))
+ */ \
+ MACRO(HasOwn, has_own, NULL, 1, 2, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * Push a bool representing the presence of private field id on obj.
+ * May throw, depending on the ThrowCondition.
+ *
+ * Two arguments:
+ * - throwCondition: One of the ThrowConditions defined in
+ * ThrowMsgKind.h. Determines why (or if) this op will throw.
+ * - msgKind: One of the ThrowMsgKinds defined in ThrowMsgKind.h, which
+ * maps to one of the messages in js.msg. Note: It's not possible to
+ * pass arguments to the message at the moment.
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands: ThrowCondition throwCondition, ThrowMsgKind msgKind
+ * Stack: obj, key => obj, key, (obj.hasOwnProperty(id))
+ */ \
+ MACRO(CheckPrivateField, check_private_field, NULL, 3, 2, 3, JOF_TWO_UINT8|JOF_CHECKSTRICT|JOF_IC) \
+ /*
+ * Push a new private name.
+ *
+ * Category: Objects
+ * Type: Accessing properties
+ * Operands: uint32_t nameIndex
+ * Stack: => private_name
+ */ \
+ MACRO(NewPrivateName, new_private_name, NULL, 5, 0, 1, JOF_ATOM) \
+ /*
+ * Push the SuperBase of the method `callee`. The SuperBase is
+ * `callee.[[HomeObject]].[[GetPrototypeOf]]()`, the object where `super`
+ * property lookups should begin.
+ *
+ * `callee` must be a function that has a HomeObject that's an object,
+ * typically produced by `JSOp::Callee` or `JSOp::EnvCallee`.
+ *
+ * Implements: [GetSuperBase][1], except that instead of the environment,
+ * the argument supplies the callee.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-getsuperbase
+ *
+ * Category: Objects
+ * Type: Super
+ * Operands:
+ * Stack: callee => superBase
+ */ \
+ MACRO(SuperBase, super_base, NULL, 1, 1, 1, JOF_BYTE) \
+ /*
+ * Get the value of `receiver.name`, starting the property search at `obj`.
+ * In spec terms, `obj.[[Get]](name, receiver)`.
+ *
+ * Implements: [GetValue][1] for references created by [`super.name`][2].
+ * The `receiver` is `this` and `obj` is the SuperBase of the enclosing
+ * method.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-getvalue
+ * [2]: https://tc39.es/ecma262/#sec-super-keyword-runtime-semantics-evaluation
+ *
+ * Category: Objects
+ * Type: Super
+ * Operands: uint32_t nameIndex
+ * Stack: receiver, obj => super.name
+ */ \
+ MACRO(GetPropSuper, get_prop_super, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_IC) \
+ /*
+ * Get the value of `receiver[key]`, starting the property search at `obj`.
+ * In spec terms, `obj.[[Get]](key, receiver)`.
+ *
+ * Implements: [GetValue][1] for references created by [`super[key]`][2]
+ * (where the `receiver` is `this` and `obj` is the SuperBase of the enclosing
+ * method); [`Reflect.get(obj, key, receiver)`][3].
+ *
+ * [1]: https://tc39.es/ecma262/#sec-getvalue
+ * [2]: https://tc39.es/ecma262/#sec-super-keyword-runtime-semantics-evaluation
+ * [3]: https://tc39.es/ecma262/#sec-reflect.get
+ *
+ * Category: Objects
+ * Type: Super
+ * Operands:
+ * Stack: receiver, key, obj => super[key]
+ */ \
+ MACRO(GetElemSuper, get_elem_super, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_IC) \
+ /*
+ * Assign `val` to `receiver.name`, starting the search for an existing
+ * property at `obj`. In spec terms, `obj.[[Set]](name, val, receiver)`.
+ *
+ * Implements: [PutValue][1] for references created by [`super.name`][2] in
+ * non-strict code. The `receiver` is `this` and `obj` is the SuperBase of
+ * the enclosing method.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-putvalue
+ * [2]: https://tc39.es/ecma262/#sec-super-keyword-runtime-semantics-evaluation
+ *
+ * Category: Objects
+ * Type: Super
+ * Operands: uint32_t nameIndex
+ * Stack: receiver, obj, val => val
+ */ \
+ MACRO(SetPropSuper, set_prop_super, NULL, 5, 3, 1, JOF_ATOM|JOF_PROP|JOF_PROPSET|JOF_CHECKSLOPPY) \
+ /*
+ * Like `JSOp::SetPropSuper`, but for strict mode code.
+ *
+ * Category: Objects
+ * Type: Super
+ * Operands: uint32_t nameIndex
+ * Stack: receiver, obj, val => val
+ */ \
+ MACRO(StrictSetPropSuper, strict_set_prop_super, NULL, 5, 3, 1, JOF_ATOM|JOF_PROP|JOF_PROPSET|JOF_CHECKSTRICT) \
+ /*
+ * Assign `val` to `receiver[key]`, strating the search for an existing
+ * property at `obj`. In spec terms, `obj.[[Set]](key, val, receiver)`.
+ *
+ * Implements: [PutValue][1] for references created by [`super[key]`][2] in
+ * non-strict code. The `receiver` is `this` and `obj` is the SuperBase of
+ * the enclosing method.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-putvalue
+ * [2]: https://tc39.es/ecma262/#sec-super-keyword-runtime-semantics-evaluation
+ *
+ * Category: Objects
+ * Type: Super
+ * Operands:
+ * Stack: receiver, key, obj, val => val
+ */ \
+ MACRO(SetElemSuper, set_elem_super, NULL, 1, 4, 1, JOF_BYTE|JOF_ELEM|JOF_PROPSET|JOF_CHECKSLOPPY) \
+ /*
+ * Like `JSOp::SetElemSuper`, but for strict mode code.
+ *
+ * Category: Objects
+ * Type: Super
+ * Operands:
+ * Stack: receiver, key, obj, val => val
+ */ \
+ MACRO(StrictSetElemSuper, strict_set_elem_super, NULL, 1, 4, 1, JOF_BYTE|JOF_ELEM|JOF_PROPSET|JOF_CHECKSTRICT) \
+ /*
+ * Set up a for-in loop by pushing a `PropertyIteratorObject` over the
+ * enumerable properties of `val`.
+ *
+ * Implements: [ForIn/OfHeadEvaluation][1] step 6,
+ * [EnumerateObjectProperties][1]. (The spec refers to an "Iterator object"
+ * with a `next` method, but notes that it "is never directly accessible"
+ * to scripts. The object we use for this has no public methods.)
+ *
+ * If `val` is null or undefined, this pushes an empty iterator.
+ *
+ * The `iter` object pushed by this instruction must not be used or removed
+ * from the stack except by `JSOp::MoreIter` and `JSOp::EndIter`, or by error
+ * handling.
+ *
+ * The script's `JSScript::trynotes()` must mark the body of the `for-in`
+ * loop, i.e. exactly those instructions that begin executing with `iter`
+ * on the stack, starting with the next instruction (always
+ * `JSOp::LoopHead`). Code must not jump into or out of this region: control
+ * can enter only by executing `JSOp::Iter` and can exit only by executing a
+ * `JSOp::EndIter` or by exception unwinding. (A `JSOp::EndIter` is always
+ * emitted at the end of the loop, and extra copies are emitted on "exit
+ * slides", where a `break`, `continue`, or `return` statement exits the
+ * loop.)
+ *
+ * Typically a single try note entry marks the contiguous chunk of bytecode
+ * from the instruction after `JSOp::Iter` to `JSOp::EndIter` (inclusive);
+ * but if that range contains any instructions on exit slides, after a
+ * `JSOp::EndIter`, then those must be correctly noted as *outside* the
+ * loop.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-runtime-semantics-forin-div-ofheadevaluation-tdznames-expr-iterationkind
+ * [2]: https://tc39.es/ecma262/#sec-enumerate-object-properties
+ *
+ * Category: Objects
+ * Type: Enumeration
+ * Operands:
+ * Stack: val => iter
+ */ \
+ MACRO(Iter, iter, NULL, 1, 1, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * Get the next property name for a for-in loop.
+ *
+ * `iter` must be a `PropertyIteratorObject` produced by `JSOp::Iter`. This
+ * pushes the property name for the next loop iteration, or
+ * `MagicValue(JS_NO_ITER_VALUE)` if there are no more enumerable
+ * properties to iterate over. The magic value must be used only by
+ * `JSOp::IsNoIter` and `JSOp::EndIter`.
+ *
+ * Category: Objects
+ * Type: Enumeration
+ * Operands:
+ * Stack: iter => iter, name
+ */ \
+ MACRO(MoreIter, more_iter, NULL, 1, 1, 2, JOF_BYTE) \
+ /*
+ * Test whether the value on top of the stack is
+ * `MagicValue(JS_NO_ITER_VALUE)` and push the boolean result.
+ *
+ * Category: Objects
+ * Type: Enumeration
+ * Operands:
+ * Stack: val => val, done
+ */ \
+ MACRO(IsNoIter, is_no_iter, NULL, 1, 1, 2, JOF_BYTE) \
+ /*
+ * Exit a for-in loop, closing the iterator.
+ *
+ * `iter` must be a `PropertyIteratorObject` pushed by `JSOp::Iter`.
+ *
+ * Category: Objects
+ * Type: Enumeration
+ * Operands:
+ * Stack: iter, iterval =>
+ */ \
+ MACRO(EndIter, end_iter, NULL, 1, 2, 0, JOF_BYTE) \
+ /*
+ * If the iterator object on top of the stack has a `return` method,
+ * call that method. If the method exists but does not return an object,
+ * and `kind` is not `CompletionKind::Throw`, throw a TypeError. (If
+ * `kind` is `Throw`, the error we are already throwing takes precedence.)
+ *
+ * `iter` must be an object conforming to the [Iterator][1] interface.
+ *
+ * Implements: [IteratorClose][2]
+ *
+ * [1]: https://tc39.es/ecma262/#sec-iterator-interface
+ * [2]: https://tc39.es/ecma262/#sec-iteratorclose
+ * Category: Objects
+ * Type: Iteration
+ * Operands: CompletionKind kind
+ * Stack: iter =>
+ */ \
+ MACRO(CloseIter, close_iter, NULL, 2, 1, 0, JOF_UINT8|JOF_IC) \
+ /*
+ * Check that the top value on the stack is an object, and throw a
+ * TypeError if not. `kind` is used only to generate an appropriate error
+ * message.
+ *
+ * Implements: [GetIterator][1] step 5, [IteratorNext][2] step 3. Both
+ * operations call a JS method which scripts can define however they want,
+ * so they check afterwards that the method returned an object.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-getiterator
+ * [2]: https://tc39.es/ecma262/#sec-iteratornext
+ *
+ * Category: Objects
+ * Type: Iteration
+ * Operands: CheckIsObjectKind kind
+ * Stack: result => result
+ */ \
+ MACRO(CheckIsObj, check_is_obj, NULL, 2, 1, 1, JOF_UINT8) \
+ /*
+ * Throw a TypeError if `val` is `null` or `undefined`.
+ *
+ * Implements: [RequireObjectCoercible][1]. But most instructions that
+ * require an object will perform this check for us, so of the dozens of
+ * calls to RequireObjectCoercible in the spec, we need this instruction
+ * only for [destructuring assignment][2] and [initialization][3].
+ *
+ * [1]: https://tc39.es/ecma262/#sec-requireobjectcoercible
+ * [2]: https://tc39.es/ecma262/#sec-runtime-semantics-destructuringassignmentevaluation
+ * [3]: https://tc39.es/ecma262/#sec-destructuring-binding-patterns-runtime-semantics-bindinginitialization
+ *
+ * Category: Objects
+ * Type: Iteration
+ * Operands:
+ * Stack: val => val
+ */ \
+ MACRO(CheckObjCoercible, check_obj_coercible, NULL, 1, 1, 1, JOF_BYTE) \
+ /*
+ * Create and push an async iterator wrapping the sync iterator `iter`.
+ * `next` should be `iter`'s `.next` method.
+ *
+ * Implements: [CreateAsyncToSyncIterator][1]. The spec says this operation
+ * takes one argument, but that argument is a Record with two relevant
+ * fields, `[[Iterator]]` and `[[NextMethod]]`.
+ *
+ * Used for `for await` loops.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-createasyncfromsynciterator
+ *
+ * Category: Objects
+ * Type: Iteration
+ * Operands:
+ * Stack: iter, next => asynciter
+ */ \
+ MACRO(ToAsyncIter, to_async_iter, NULL, 1, 2, 1, JOF_BYTE) \
+ /*
+ * Set the prototype of `obj`.
+ *
+ * `obj` must be an object.
+ *
+ * Implements: [B.3.1 __proto__ Property Names in Object Initializers][1], step 7.a.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-__proto__-property-names-in-object-initializers
+ *
+ * Category: Objects
+ * Type: SetPrototype
+ * Operands:
+ * Stack: obj, protoVal => obj
+ */ \
+ MACRO(MutateProto, mutate_proto, NULL, 1, 2, 1, JOF_BYTE) \
+ /*
+ * Create and push a new Array object with the given `length`,
+ * preallocating enough memory to hold that many elements.
+ *
+ * Category: Objects
+ * Type: Array literals
+ * Operands: uint32_t length
+ * Stack: => array
+ */ \
+ MACRO(NewArray, new_array, NULL, 5, 0, 1, JOF_UINT32|JOF_IC) \
+ /*
+ * Initialize an array element `array[index]` with value `val`.
+ *
+ * `val` may be `MagicValue(JS_ELEMENTS_HOLE)` pushed by `JSOp::Hole`.
+ *
+ * This never calls setters or proxy traps.
+ *
+ * `array` must be an Array object created by `JSOp::NewArray` with length >
+ * `index`, and never used except by `JSOp::InitElemArray`.
+ *
+ * Implements: [ArrayAccumulation][1], the third algorithm, step 4, in the
+ * common case where *nextIndex* is known.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-runtime-semantics-arrayaccumulation
+ *
+ * Category: Objects
+ * Type: Array literals
+ * Operands: uint32_t index
+ * Stack: array, val => array
+ */ \
+ MACRO(InitElemArray, init_elem_array, NULL, 5, 2, 1, JOF_UINT32|JOF_ELEM|JOF_PROPINIT) \
+ /*
+ * Initialize an array element `array[index++]` with value `val`.
+ *
+ * `val` may be `MagicValue(JS_ELEMENTS_HOLE)` pushed by `JSOp::Hole`. If it
+ * is, no element is defined, but the array length and the stack value
+ * `index` are still incremented.
+ *
+ * This never calls setters or proxy traps.
+ *
+ * `array` must be an Array object created by `JSOp::NewArray` and never used
+ * except by `JSOp::InitElemArray` and `JSOp::InitElemInc`.
+ *
+ * `index` must be an integer, `0 <= index <= INT32_MAX`. If `index` is
+ * `INT32_MAX`, this throws a RangeError. Unlike `InitElemArray`, it is not
+ * necessary that the `array` length > `index`.
+ *
+ * This instruction is used when an array literal contains a
+ * *SpreadElement*. In `[a, ...b, c]`, `InitElemArray 0` is used to put
+ * `a` into the array, but `InitElemInc` is used for the elements of `b`
+ * and for `c`.
+ *
+ * Implements: Several steps in [ArrayAccumulation][1] that call
+ * CreateDataProperty, set the array length, and/or increment *nextIndex*.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-runtime-semantics-arrayaccumulation
+ *
+ * Category: Objects
+ * Type: Array literals
+ * Operands:
+ * Stack: array, index, val => array, (index + 1)
+ */ \
+ MACRO(InitElemInc, init_elem_inc, NULL, 1, 3, 2, JOF_BYTE|JOF_ELEM|JOF_PROPINIT|JOF_IC) \
+ /*
+ * Push `MagicValue(JS_ELEMENTS_HOLE)`, representing an *Elision* in an
+ * array literal (like the missing property 0 in the array `[, 1]`).
+ *
+ * This magic value must be used only by `JSOp::InitElemArray` or
+ * `JSOp::InitElemInc`.
+ *
+ * Category: Objects
+ * Type: Array literals
+ * Operands:
+ * Stack: => hole
+ */ \
+ MACRO(Hole, hole, NULL, 1, 0, 1, JOF_BYTE) \
+ /*
+ * Clone and push a new RegExp object.
+ *
+ * Implements: [Evaluation for *RegularExpressionLiteral*][1].
+ *
+ * [1]: https://tc39.es/ecma262/#sec-regular-expression-literals-runtime-semantics-evaluation
+ *
+ * Category: Objects
+ * Type: RegExp literals
+ * Operands: uint32_t regexpIndex
+ * Stack: => regexp
+ */ \
+ MACRO(RegExp, reg_exp, NULL, 5, 0, 1, JOF_REGEXP) \
+ /*
+ * Initialize a new record, preallocating `length` memory slots. `length` can still grow
+ * if needed, for example when using the spread operator.
+ *
+ * Implements: [RecordLiteral Evaluation][1] step 1.
+ *
+ * [1]: https://tc39.es/proposal-record-tuple/#sec-record-initializer-runtime-semantics-evaluation
+ *
+ * Category: Compound primitives
+ * Type: Record literals
+ * Operands: uint32_t length
+ * Stack: => rval
+ */ \
+ IF_RECORD_TUPLE(MACRO(InitRecord, init_record, NULL, 5, 0, 1, JOF_UINT32)) \
+ /*
+ * Add the last element in the stack to the preceding tuple.
+ *
+ * Implements: [AddPropertyIntoRecordEntriesList][1].
+ *
+ * [1]: https://tc39.es/proposal-record-tuple/#sec-addpropertyintorecordentrieslist
+ *
+ * Category: Compound primitives
+ * Type: Record literals
+ * Operands:
+ * Stack: record, key, value => record
+ */ \
+ IF_RECORD_TUPLE(MACRO(AddRecordProperty, add_record_property, NULL, 1, 3, 1, JOF_BYTE)) \
+ /*
+ * Add the last element in the stack to the preceding tuple.
+ *
+ * Implements: [RecordPropertyDefinitionEvaluation][1] for
+ * RecordPropertyDefinition : ... AssignmentExpression
+ *
+ * [1]: https://tc39.es/proposal-record-tuple/#sec-addpropertyintorecordentrieslist
+ *
+ * Category: Compound primitives
+ * Type: Record literals
+ * Operands:
+ * Stack: record, value => record
+ */ \
+ IF_RECORD_TUPLE(MACRO(AddRecordSpread, add_record_spread, NULL, 1, 2, 1, JOF_BYTE)) \
+ /*
+ * Mark a record as "initialized", going from "write-only" mode to
+ * "read-only" mode.
+ *
+ * Category: Compound primitives
+ * Type: Record literals
+ * Operands:
+ * Stack: record => record
+ */ \
+ IF_RECORD_TUPLE(MACRO(FinishRecord, finish_record, NULL, 1, 1, 1, JOF_BYTE)) \
+ /*
+ * Initialize a new tuple, preallocating `length` memory slots. `length` can still grow
+ * if needed, for example when using the spread operator.
+ *
+ * Implements: [TupleLiteral Evaluation][1] step 1.
+ *
+ * [1]: https://tc39.es/proposal-record-tuple/#sec-tuple-initializer-runtime-semantics-evaluation
+ *
+ * Category: Compound primitives
+ * Type: Tuple literals
+ * Operands: uint32_t length
+ * Stack: => rval
+ */ \
+ IF_RECORD_TUPLE(MACRO(InitTuple, init_tuple, NULL, 5, 0, 1, JOF_UINT32)) \
+ /*
+ * Add the last element in the stack to the preceding tuple.
+ *
+ * Implements: [AddValueToTupleSequenceList][1].
+ *
+ * [1]: https://tc39.es/proposal-record-tuple/#sec-addvaluetotuplesequencelist
+ *
+ * Category: Compound primitives
+ * Type: Tuple literals
+ * Operands:
+ * Stack: tuple, element => tuple
+ */ \
+ IF_RECORD_TUPLE(MACRO(AddTupleElement, add_tuple_element, NULL, 1, 2, 1, JOF_BYTE)) \
+ /*
+ * Mark a tuple as "initialized", going from "write-only" mode to
+ * "read-only" mode.
+ *
+ * Category: Compound primitives
+ * Type: Tuple literals
+ * Operands:
+ * Stack: tuple => tuple
+ */ \
+ IF_RECORD_TUPLE(MACRO(FinishTuple, finish_tuple, NULL, 1, 1, 1, JOF_BYTE)) \
+ /*
+ * Push a new function object.
+ *
+ * The new function inherits the current environment chain.
+ *
+ * Used to create most JS functions. Notable exceptions are derived or
+ * default class constructors.
+ *
+ * Implements: [InstantiateFunctionObject][1], [Evaluation for
+ * *FunctionExpression*][2], and so on.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-function-definitions-runtime-semantics-instantiatefunctionobject
+ * [2]: https://tc39.es/ecma262/#sec-function-definitions-runtime-semantics-evaluation
+ *
+ * Category: Functions
+ * Type: Creating functions
+ * Operands: uint32_t funcIndex
+ * Stack: => fn
+ */ \
+ MACRO(Lambda, lambda, NULL, 5, 0, 1, JOF_OBJECT|JOF_USES_ENV) \
+ /*
+ * Set the name of a function.
+ *
+ * `fun` must be a function object. `name` must be a string, Int32 value,
+ * or symbol (like the result of `JSOp::ToId`).
+ *
+ * Implements: [SetFunctionName][1], used e.g. to name methods with
+ * computed property names.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-setfunctionname
+ *
+ * Category: Functions
+ * Type: Creating functions
+ * Operands: FunctionPrefixKind prefixKind
+ * Stack: fun, name => fun
+ */ \
+ MACRO(SetFunName, set_fun_name, NULL, 2, 2, 1, JOF_UINT8) \
+ /*
+ * Initialize the home object for functions with super bindings.
+ *
+ * `fun` must be a method, getter, or setter, so that it has a
+ * [[HomeObject]] slot. `homeObject` must be a plain object or (for static
+ * methods) a constructor.
+ *
+ * Category: Functions
+ * Type: Creating functions
+ * Operands:
+ * Stack: fun, homeObject => fun
+ */ \
+ MACRO(InitHomeObject, init_home_object, NULL, 1, 2, 1, JOF_BYTE) \
+ /*
+ * Throw a TypeError if `baseClass` isn't either `null` or a constructor.
+ *
+ * Implements: [ClassDefinitionEvaluation][1] step 6.f.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-runtime-semantics-classdefinitionevaluation
+ *
+ * Category: Functions
+ * Type: Creating constructors
+ * Operands:
+ * Stack: baseClass => baseClass
+ */ \
+ MACRO(CheckClassHeritage, check_class_heritage, NULL, 1, 1, 1, JOF_BYTE) \
+ /*
+ * Like `JSOp::Lambda`, but using `proto` as the new function's
+ * `[[Prototype]]` (or `%FunctionPrototype%` if `proto` is `null`).
+ *
+ * `proto` must be either a constructor or `null`. We use
+ * `JSOp::CheckClassHeritage` to check.
+ *
+ * This is used to create the constructor for a derived class.
+ *
+ * Implements: [ClassDefinitionEvaluation][1] steps 6.e.ii, 6.g.iii, and
+ * 12 for derived classes.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-runtime-semantics-classdefinitionevaluation
+ *
+ * Category: Functions
+ * Type: Creating constructors
+ * Operands: uint32_t funcIndex
+ * Stack: proto => obj
+ */ \
+ MACRO(FunWithProto, fun_with_proto, NULL, 5, 1, 1, JOF_OBJECT|JOF_USES_ENV) \
+ /*
+ * Pushes the current global's %BuiltinObject%.
+ *
+ * `kind` must be a valid `BuiltinObjectKind` (and must not be
+ * `BuiltinObjectKind::None`).
+ *
+ * Category: Objects
+ * Type: Built-in objects
+ * Operands: uint8_t kind
+ * Stack: => %BuiltinObject%
+ */ \
+ MACRO(BuiltinObject, builtin_object, NULL, 2, 0, 1, JOF_UINT8) \
+ /*
+ * Invoke `callee` with `this` and `args`, and push the return value. Throw
+ * a TypeError if `callee` isn't a function.
+ *
+ * `JSOp::CallContent` is for `callContentFunction` in self-hosted JS, and
+ * this is for handling it differently in debugger's `onNativeCall` hook.
+ * `onNativeCall` hook disables all JITs, and `JSOp::CallContent` is
+ * treated exactly the same as `JSOP::Call` in JIT.
+ *
+ * `JSOp::CallIter` is used for implicit calls to @@iterator methods, to
+ * ensure error messages are formatted with `JSMSG_NOT_ITERABLE` ("x is not
+ * iterable") rather than `JSMSG_NOT_FUNCTION` ("x[Symbol.iterator] is not
+ * a function"). The `argc` operand must be 0 for this variation.
+ *
+ * `JSOp::CallContentIter` is `JSOp::CallContent` variant of
+ * `JSOp::CallIter`.
+ *
+ * `JSOp::CallIgnoresRv` hints to the VM that the return value is ignored.
+ * This allows alternate faster implementations to be used that avoid
+ * unnecesary allocations.
+ *
+ * Implements: [EvaluateCall][1] steps 4, 5, and 7.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-evaluatecall
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands: uint16_t argc
+ * Stack: callee, this, args[0], ..., args[argc-1] => rval
+ */ \
+ MACRO(Call, call, NULL, 3, -1, 1, JOF_ARGC|JOF_INVOKE|JOF_IC) \
+ MACRO(CallContent, call_content, NULL, 3, -1, 1, JOF_ARGC|JOF_INVOKE|JOF_IC) \
+ MACRO(CallIter, call_iter, NULL, 3, -1, 1, JOF_ARGC|JOF_INVOKE|JOF_IC) \
+ MACRO(CallContentIter, call_content_iter, NULL, 3, -1, 1, JOF_ARGC|JOF_INVOKE|JOF_IC) \
+ MACRO(CallIgnoresRv, call_ignores_rv, NULL, 3, -1, 1, JOF_ARGC|JOF_INVOKE|JOF_IC) \
+ /*
+ * Like `JSOp::Call`, but the arguments are provided in an array rather than
+ * a span of stack slots. Used to implement spread-call syntax:
+ * `f(...args)`.
+ *
+ * `args` must be an Array object containing the actual arguments. The
+ * array must be packed (dense and free of holes; see IsPackedArray).
+ * This can be ensured by creating the array with `JSOp::NewArray` and
+ * populating it using `JSOp::InitElemArray`.
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands:
+ * Stack: callee, this, args => rval
+ */ \
+ MACRO(SpreadCall, spread_call, NULL, 1, 3, 1, JOF_BYTE|JOF_INVOKE|JOF_SPREAD|JOF_IC) \
+ /*
+ * Push an array object that can be passed directly as the `args` argument
+ * to `JSOp::SpreadCall`. If the operation can't be optimized, push
+ * `undefined` instead.
+ *
+ * This instruction and the branch around the iterator loop are emitted
+ * only when `iterable` is the sole argument in a call, as in `f(...arr)`.
+ *
+ * See `js::OptimizeSpreadCall`.
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands:
+ * Stack: iterable => array_or_undefined
+ */ \
+ MACRO(OptimizeSpreadCall, optimize_spread_call, NULL, 1, 1, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * Perform a direct eval in the current environment if `callee` is the
+ * builtin `eval` function, otherwise follow same behaviour as `JSOp::Call`.
+ *
+ * All direct evals use one of the JSOp::*Eval instructions here and these
+ * opcodes are only used when the syntactic conditions for a direct eval
+ * are met. If the builtin `eval` function is called though other means, it
+ * becomes an indirect eval.
+ *
+ * Direct eval causes all bindings in *enclosing* non-global scopes to be
+ * marked "aliased". The optimization that puts bindings in stack slots has
+ * to prove that the bindings won't need to be captured by closures or
+ * accessed using `JSOp::{Get,Bind,Set,Del}Name` instructions. Direct eval
+ * makes that analysis impossible.
+ *
+ * The instruction immediately following any `JSOp::*Eval` instruction must
+ * be `JSOp::Lineno`.
+ *
+ * Implements: [Function Call Evaluation][1], steps 5-7 and 9, when the
+ * syntactic critera for direct eval in step 6 are all met.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-function-calls-runtime-semantics-evaluation
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands: uint16_t argc
+ * Stack: callee, this, args[0], ..., args[argc-1] => rval
+ */ \
+ MACRO(Eval, eval, NULL, 3, -1, 1, JOF_ARGC|JOF_INVOKE|JOF_CHECKSLOPPY|JOF_IC) \
+ /*
+ * Spread-call variant of `JSOp::Eval`.
+ *
+ * See `JSOp::SpreadCall` for restrictions on `args`.
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands:
+ * Stack: callee, this, args => rval
+ */ \
+ MACRO(SpreadEval, spread_eval, NULL, 1, 3, 1, JOF_BYTE|JOF_INVOKE|JOF_SPREAD|JOF_CHECKSLOPPY|JOF_IC) \
+ /*
+ * Like `JSOp::Eval`, but for strict mode code.
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands: uint16_t argc
+ * Stack: evalFn, this, args[0], ..., args[argc-1] => rval
+ */ \
+ MACRO(StrictEval, strict_eval, NULL, 3, -1, 1, JOF_ARGC|JOF_INVOKE|JOF_CHECKSTRICT|JOF_IC) \
+ /*
+ * Spread-call variant of `JSOp::StrictEval`.
+ *
+ * See `JSOp::SpreadCall` for restrictions on `args`.
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands:
+ * Stack: callee, this, args => rval
+ */ \
+ MACRO(StrictSpreadEval, strict_spread_eval, NULL, 1, 3, 1, JOF_BYTE|JOF_INVOKE|JOF_SPREAD|JOF_CHECKSTRICT|JOF_IC) \
+ /*
+ * Push the implicit `this` value for an unqualified function call, like
+ * `foo()`. `nameIndex` gives the name of the function we're calling.
+ *
+ * The result is always `undefined` except when the name refers to a `with`
+ * binding. For example, in `with (date) { getFullYear(); }`, the
+ * implicit `this` passed to `getFullYear` is `date`, not `undefined`.
+ *
+ * This walks the run-time environment chain looking for the environment
+ * record that contains the function. If the function call definitely
+ * refers to a local binding, use `JSOp::Undefined`.
+ *
+ * Implements: [EvaluateCall][1] step 1.b. But not entirely correctly.
+ * See [bug 1166408][2].
+ *
+ * [1]: https://tc39.es/ecma262/#sec-evaluatecall
+ * [2]: https://bugzilla.mozilla.org/show_bug.cgi?id=1166408
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands: uint32_t nameIndex
+ * Stack: => this
+ */ \
+ MACRO(ImplicitThis, implicit_this, "", 5, 0, 1, JOF_ATOM|JOF_USES_ENV) \
+ /*
+ * Push the call site object for a tagged template call.
+ *
+ * `script->getObject(objectIndex)` is the call site object.
+ *
+ * The call site object will already have the `.raw` property defined on it
+ * and will be frozen.
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands: uint32_t objectIndex
+ * Stack: => callSiteObj
+ */ \
+ MACRO(CallSiteObj, call_site_obj, NULL, 5, 0, 1, JOF_OBJECT) \
+ /*
+ * Push `MagicValue(JS_IS_CONSTRUCTING)`.
+ *
+ * This magic value is a required argument to the `JSOp::New` and
+ * `JSOp::SuperCall` instructions and must not be used any other way.
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands:
+ * Stack: => JS_IS_CONSTRUCTING
+ */ \
+ MACRO(IsConstructing, is_constructing, NULL, 1, 0, 1, JOF_BYTE) \
+ /*
+ * Invoke `callee` as a constructor with `args` and `newTarget`, and push
+ * the return value. Throw a TypeError if `callee` isn't a constructor.
+ *
+ * `isConstructing` must be the value pushed by `JSOp::IsConstructing`.
+ *
+ * `JSOp::SuperCall` behaves exactly like `JSOp::New`, but is used for
+ * *SuperCall* expressions, to allow JITs to distinguish them from `new`
+ * expressions.
+ *
+ * `JSOp::NewContent` is for `constructContentFunction` in self-hosted JS.
+ * See the comment for `JSOp::CallContent` for more details.
+ *
+ * Implements: [EvaluateConstruct][1] steps 7 and 8.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-evaluatenew
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands: uint16_t argc
+ * Stack: callee, isConstructing, args[0], ..., args[argc-1], newTarget => rval
+ */ \
+ MACRO(New, new_, NULL, 3, -1, 1, JOF_ARGC|JOF_INVOKE|JOF_CONSTRUCT|JOF_IC) \
+ MACRO(NewContent, new_content, NULL, 3, -1, 1, JOF_ARGC|JOF_INVOKE|JOF_CONSTRUCT|JOF_IC) \
+ MACRO(SuperCall, super_call, NULL, 3, -1, 1, JOF_ARGC|JOF_INVOKE|JOF_CONSTRUCT|JOF_IC) \
+ /*
+ * Spread-call variant of `JSOp::New`.
+ *
+ * Invokes `callee` as a constructor with `args` and `newTarget`, and
+ * pushes the return value onto the stack.
+ *
+ * `isConstructing` must be the value pushed by `JSOp::IsConstructing`.
+ * See `JSOp::SpreadCall` for restrictions on `args`.
+ *
+ * `JSOp::SpreadSuperCall` behaves exactly like `JSOp::SpreadNew`, but is
+ * used for *SuperCall* expressions.
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands:
+ * Stack: callee, isConstructing, args, newTarget => rval
+ */ \
+ MACRO(SpreadNew, spread_new, NULL, 1, 4, 1, JOF_BYTE|JOF_INVOKE|JOF_CONSTRUCT|JOF_SPREAD|JOF_IC) \
+ MACRO(SpreadSuperCall, spread_super_call, NULL, 1, 4, 1, JOF_BYTE|JOF_INVOKE|JOF_CONSTRUCT|JOF_SPREAD|JOF_IC) \
+ /*
+ * Push the prototype of `callee` in preparation for calling `super()`.
+ *
+ * `callee` must be a derived class constructor.
+ *
+ * Implements: [GetSuperConstructor][1], steps 4-7.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-getsuperconstructor
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands:
+ * Stack: callee => superFun
+ */ \
+ MACRO(SuperFun, super_fun, NULL, 1, 1, 1, JOF_BYTE) \
+ /*
+ * Throw a ReferenceError if `thisval` is not
+ * `MagicValue(JS_UNINITIALIZED_LEXICAL)`. Used in derived class
+ * constructors to prohibit calling `super` more than once.
+ *
+ * Implements: [BindThisValue][1], step 3.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-bindthisvalue
+ *
+ * Category: Functions
+ * Type: Calls
+ * Operands:
+ * Stack: thisval => thisval
+ */ \
+ MACRO(CheckThisReinit, check_this_reinit, NULL, 1, 1, 1, JOF_BYTE) \
+ /*
+ * Create and push a generator object for the current frame.
+ *
+ * This instruction must appear only in scripts for generators, async
+ * functions, and async generators. There must not already be a generator
+ * object for the current frame (that is, this instruction must execute at
+ * most once per generator or async call).
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands:
+ * Stack: => gen
+ */ \
+ MACRO(Generator, generator, NULL, 1, 0, 1, JOF_BYTE|JOF_USES_ENV) \
+ /*
+ * Suspend the current generator and return to the caller.
+ *
+ * When a generator is called, its script starts running, like any other JS
+ * function, because [FunctionDeclarationInstantation][1] and other
+ * [generator object setup][2] are implemented mostly in bytecode. However,
+ * the *FunctionBody* of the generator is not supposed to start running
+ * until the first `.next()` call, so after setup the script suspends
+ * itself: the "initial yield".
+ *
+ * Later, when resuming execution, `rval`, `gen` and `resumeKind` will
+ * receive the values passed in by `JSOp::Resume`. `resumeKind` is the
+ * `GeneratorResumeKind` stored as an Int32 value.
+ *
+ * This instruction must appear only in scripts for generators and async
+ * generators. `gen` must be the generator object for the current frame. It
+ * must not have been previously suspended. The resume point indicated by
+ * `resumeIndex` must be the next instruction in the script, which must be
+ * `AfterYield`.
+ *
+ * Implements: [GeneratorStart][3], steps 4-7.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-functiondeclarationinstantiation
+ * [2]: https://tc39.es/ecma262/#sec-generator-function-definitions-runtime-semantics-evaluatebody
+ * [3]: https://tc39.es/ecma262/#sec-generatorstart
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands: uint24_t resumeIndex
+ * Stack: gen => rval, gen, resumeKind
+ */ \
+ MACRO(InitialYield, initial_yield, NULL, 4, 1, 3, JOF_RESUMEINDEX) \
+ /*
+ * Bytecode emitted after `yield` expressions. This is useful for the
+ * Debugger and `AbstractGeneratorObject::isAfterYieldOrAwait`. It's
+ * treated as jump target op so that the Baseline Interpreter can
+ * efficiently restore the frame's interpreterICEntry when resuming a
+ * generator.
+ *
+ * The preceding instruction in the script must be `Yield`, `InitialYield`,
+ * or `Await`.
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands: uint32_t icIndex
+ * Stack: =>
+ */ \
+ MACRO(AfterYield, after_yield, NULL, 5, 0, 0, JOF_ICINDEX) \
+ /*
+ * Suspend and close the current generator, async function, or async
+ * generator.
+ *
+ * `gen` must be the generator object for the current frame.
+ *
+ * If the current function is a non-async generator, then the value in the
+ * frame's return value slot is returned to the caller. It should be an
+ * object of the form `{value: returnValue, done: true}`.
+ *
+ * If the current function is an async function or async generator, the
+ * frame's return value slot must contain the current frame's result
+ * promise, which must already be resolved or rejected.
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands:
+ * Stack: gen =>
+ */ \
+ MACRO(FinalYieldRval, final_yield_rval, NULL, 1, 1, 0, JOF_BYTE) \
+ /*
+ * Suspend execution of the current generator or async generator, returning
+ * `rval1`.
+ *
+ * For non-async generators, `rval1` should be an object of the form
+ * `{value: valueToYield, done: true}`. For async generators, `rval1`
+ * should be the value to yield, and the caller is responsible for creating
+ * the iterator result object (under `js::AsyncGeneratorYield`).
+ *
+ * This instruction must appear only in scripts for generators and async
+ * generators. `gen` must be the generator object for the current stack
+ * frame. The resume point indicated by `resumeIndex` must be the next
+ * instruction in the script, which must be `AfterYield`.
+ *
+ * When resuming execution, `rval2`, `gen` and `resumeKind` receive the
+ * values passed in by `JSOp::Resume`.
+ *
+ * Implements: [GeneratorYield][1] and [AsyncGeneratorYield][2].
+ *
+ * [1]: https://tc39.es/ecma262/#sec-generatoryield
+ * [2]: https://tc39.es/ecma262/#sec-asyncgeneratoryield
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands: uint24_t resumeIndex
+ * Stack: rval1, gen => rval2, gen, resumeKind
+ */ \
+ MACRO(Yield, yield, NULL, 4, 2, 3, JOF_RESUMEINDEX) \
+ /*
+ * Pushes a boolean indicating whether the top of the stack is
+ * `MagicValue(JS_GENERATOR_CLOSING)`.
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands:
+ * Stack: val => val, res
+ */ \
+ MACRO(IsGenClosing, is_gen_closing, NULL, 1, 1, 2, JOF_BYTE) \
+ /*
+ * Arrange for this async function to resume asynchronously when `value`
+ * becomes resolved.
+ *
+ * This is the last thing an async function does before suspending for an
+ * `await` expression. It coerces the awaited `value` to a promise and
+ * effectively calls `.then()` on it, passing handler functions that will
+ * resume this async function call later. See `js::AsyncFunctionAwait`.
+ *
+ * This instruction must appear only in non-generator async function
+ * scripts. `gen` must be the internal generator object for the current
+ * frame. After this instruction, the script should suspend itself with
+ * `Await` (rather than exiting any other way).
+ *
+ * The result `promise` is the async function's result promise,
+ * `gen->as<AsyncFunctionGeneratorObject>().promise()`.
+ *
+ * Implements: [Await][1], steps 2-9.
+ *
+ * [1]: https://tc39.github.io/ecma262/#await
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands:
+ * Stack: value, gen => promise
+ */ \
+ MACRO(AsyncAwait, async_await, NULL, 1, 2, 1, JOF_BYTE) \
+ /*
+ * Resolve or reject the current async function's result promise with
+ * 'valueOrReason'.
+ *
+ * This instruction must appear only in non-generator async function
+ * scripts. `gen` must be the internal generator object for the current
+ * frame. This instruction must run at most once per async function call,
+ * as resolving/rejecting an already resolved/rejected promise is not
+ * permitted.
+ *
+ * The result `promise` is the async function's result promise,
+ * `gen->as<AsyncFunctionGeneratorObject>().promise()`.
+ *
+ * Implements: [AsyncFunctionStart][1], step 4.d.i. and 4.e.i.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-async-functions-abstract-operations-async-function-start
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands: AsyncFunctionResolveKind fulfillOrReject
+ * Stack: valueOrReason, gen => promise
+ */ \
+ MACRO(AsyncResolve, async_resolve, NULL, 2, 2, 1, JOF_UINT8) \
+ /*
+ * Suspend the current frame for an `await` expression.
+ *
+ * This instruction must appear only in scripts for async functions and
+ * async generators. `gen` must be the internal generator object for the
+ * current frame.
+ *
+ * This returns `promise` to the caller. Later, when this async call is
+ * resumed, `resolved`, `gen` and `resumeKind` receive the values passed in
+ * by `JSOp::Resume`, and execution continues at the next instruction,
+ * which must be `AfterYield`.
+ *
+ * This instruction is used in two subtly different ways.
+ *
+ * 1. In async functions:
+ *
+ * ... # valueToAwait
+ * GetAliasedVar ".generator" # valueToAwait gen
+ * AsyncAwait # resultPromise
+ * GetAliasedVar ".generator" # resultPromise gen
+ * Await # resolved gen resumeKind
+ * AfterYield
+ *
+ * `AsyncAwait` arranges for this frame to be resumed later and pushes
+ * its result promise. `Await` then suspends the frame and removes it
+ * from the stack, returning the result promise to the caller. (If this
+ * async call hasn't awaited before, the caller may be user code.
+ * Otherwise, the caller is self-hosted code using `resumeGenerator`.)
+ *
+ * 2. In async generators:
+ *
+ * ... # valueToAwait
+ * GetAliasedVar ".generator" # valueToAwait gen
+ * Await # resolved gen resumeKind
+ * AfterYield
+ *
+ * `AsyncAwait` is not used, so (1) the value returned to the caller by
+ * `Await` is `valueToAwait`, not `resultPromise`; and (2) the caller
+ * is responsible for doing the async-generator equivalent of
+ * `AsyncAwait` (namely, `js::AsyncGeneratorAwait`, called from
+ * `js::AsyncGeneratorResume` after `js::CallSelfHostedFunction`
+ * returns).
+ *
+ * Implements: [Await][1], steps 10-12.
+ *
+ * [1]: https://tc39.es/ecma262/#await
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands: uint24_t resumeIndex
+ * Stack: promise, gen => resolved, gen, resumeKind
+ */ \
+ MACRO(Await, await, NULL, 4, 2, 3, JOF_RESUMEINDEX) \
+ /*
+ * Test if the re-entry to the microtask loop may be skipped.
+ *
+ * This is part of an optimization for `await` expressions. Programs very
+ * often await values that aren't promises, or promises that are already
+ * resolved. We can then sometimes skip suspending the current frame and
+ * returning to the microtask loop. If the circumstances permit the
+ * optimization, `CanSkipAwait` pushes true if the optimization is allowed,
+ * and false otherwise.
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands:
+ * Stack: value => value, can_skip
+ */ \
+ MACRO(CanSkipAwait, can_skip_await, NULL, 1, 1, 2, JOF_BYTE) \
+ /*
+ * Potentially extract an awaited value, if the await is skippable
+ *
+ * If re-entering the microtask loop is skippable (as checked by CanSkipAwait)
+ * if can_skip is true, `MaybeExtractAwaitValue` replaces `value` with the result of the
+ * `await` expression (unwrapping the resolved promise, if any). Otherwise, value remains
+ * as is.
+ *
+ * In both cases, can_skip remains the same.
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands:
+ * Stack: value, can_skip => value_or_resolved, can_skip
+ */ \
+ MACRO(MaybeExtractAwaitValue, maybe_extract_await_value, NULL, 1, 2, 2, JOF_BYTE) \
+ /*
+ * Pushes one of the GeneratorResumeKind values as Int32Value.
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands: GeneratorResumeKind resumeKind (encoded as uint8_t)
+ * Stack: => resumeKind
+ */ \
+ MACRO(ResumeKind, resume_kind, NULL, 2, 0, 1, JOF_UINT8) \
+ /*
+ * Handle Throw and Return resumption.
+ *
+ * `gen` must be the generator object for the current frame. `resumeKind`
+ * must be a `GeneratorResumeKind` stored as an `Int32` value. If it is
+ * `Next`, continue to the next instruction. If `resumeKind` is `Throw` or
+ * `Return`, these completions are handled by throwing an exception. See
+ * `GeneratorThrowOrReturn`.
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands:
+ * Stack: rval, gen, resumeKind => rval
+ */ \
+ MACRO(CheckResumeKind, check_resume_kind, NULL, 1, 3, 1, JOF_BYTE) \
+ /*
+ * Resume execution of a generator, async function, or async generator.
+ *
+ * This behaves something like a call instruction. It pushes a stack frame
+ * (the one saved when `gen` was suspended, rather than a fresh one) and
+ * runs instructions in it. Once `gen` returns or yields, its return value
+ * is pushed to this frame's stack and execution continues in this script.
+ *
+ * This instruction is emitted only for the `resumeGenerator` self-hosting
+ * intrinsic. It is used in the implementation of
+ * `%GeneratorPrototype%.next`, `.throw`, and `.return`.
+ *
+ * `gen` must be a suspended generator object. `resumeKind` must be in
+ * range for `GeneratorResumeKind`.
+ *
+ * Category: Functions
+ * Type: Generators and async functions
+ * Operands:
+ * Stack: gen, val, resumeKind => rval
+ */ \
+ MACRO(Resume, resume, NULL, 1, 3, 1, JOF_BYTE|JOF_INVOKE) \
+ /*
+ * No-op instruction marking the target of a jump instruction.
+ *
+ * This instruction and a few others (see `js::BytecodeIsJumpTarget`) are
+ * jump target instructions. The Baseline Interpreter uses these
+ * instructions to sync the frame's `interpreterICEntry` after a jump. Ion
+ * uses them to find block boundaries when translating bytecode to MIR.
+ *
+ * Category: Control flow
+ * Type: Jump targets
+ * Operands: uint32_t icIndex
+ * Stack: =>
+ */ \
+ MACRO(JumpTarget, jump_target, NULL, 5, 0, 0, JOF_ICINDEX) \
+ /*
+ * Marks the target of the backwards jump for some loop.
+ *
+ * This is a jump target instruction (see `JSOp::JumpTarget`). Additionally,
+ * it checks for interrupts and handles JIT tiering.
+ *
+ * The `depthHint` operand is a loop depth hint for Ion. It starts at 1 and
+ * deeply nested loops all have the same value.
+ *
+ * For the convenience of the JITs, scripts must not start with this
+ * instruction. See bug 1602390.
+ *
+ * Category: Control flow
+ * Type: Jump targets
+ * Operands: uint32_t icIndex, uint8_t depthHint
+ * Stack: =>
+ */ \
+ MACRO(LoopHead, loop_head, NULL, 6, 0, 0, JOF_LOOPHEAD) \
+ /*
+ * Jump to a 32-bit offset from the current bytecode.
+ *
+ * See "Jump instructions" above for details.
+ *
+ * Category: Control flow
+ * Type: Jumps
+ * Operands: int32_t offset
+ * Stack: =>
+ */ \
+ MACRO(Goto, goto_, NULL, 5, 0, 0, JOF_JUMP) \
+ /*
+ * If ToBoolean(`cond`) is false, jumps to a 32-bit offset from the current
+ * instruction.
+ *
+ * Category: Control flow
+ * Type: Jumps
+ * Operands: int32_t forwardOffset
+ * Stack: cond =>
+ */ \
+ MACRO(JumpIfFalse, jump_if_false, NULL, 5, 1, 0, JOF_JUMP|JOF_IC) \
+ /*
+ * If ToBoolean(`cond`) is true, jump to a 32-bit offset from the current
+ * instruction.
+ *
+ * `offset` may be positive or negative. This is the instruction used at the
+ * end of a do-while loop to jump back to the top.
+ *
+ * Category: Control flow
+ * Type: Jumps
+ * Operands: int32_t offset
+ * Stack: cond =>
+ */ \
+ MACRO(JumpIfTrue, jump_if_true, NULL, 5, 1, 0, JOF_JUMP|JOF_IC) \
+ /*
+ * Short-circuit for logical AND.
+ *
+ * If ToBoolean(`cond`) is false, jump to a 32-bit offset from the current
+ * instruction. The value remains on the stack.
+ *
+ * Category: Control flow
+ * Type: Jumps
+ * Operands: int32_t forwardOffset
+ * Stack: cond => cond
+ */ \
+ MACRO(And, and_, NULL, 5, 1, 1, JOF_JUMP|JOF_IC) \
+ /*
+ * Short-circuit for logical OR.
+ *
+ * If ToBoolean(`cond`) is true, jump to a 32-bit offset from the current
+ * instruction. The value remains on the stack.
+ *
+ * Category: Control flow
+ * Type: Jumps
+ * Operands: int32_t forwardOffset
+ * Stack: cond => cond
+ */ \
+ MACRO(Or, or_, NULL, 5, 1, 1, JOF_JUMP|JOF_IC) \
+ /*
+ * Short-circuiting for nullish coalescing.
+ *
+ * If `val` is not null or undefined, jump to a 32-bit offset from the
+ * current instruction.
+ *
+ * Category: Control flow
+ * Type: Jumps
+ * Operands: int32_t forwardOffset
+ * Stack: val => val
+ */ \
+ MACRO(Coalesce, coalesce, NULL, 5, 1, 1, JOF_JUMP) \
+ /*
+ * Like `JSOp::JumpIfTrue`, but if the branch is taken, pop and discard an
+ * additional stack value.
+ *
+ * This is used to implement `switch` statements when the
+ * `JSOp::TableSwitch` optimization is not possible. The switch statement
+ *
+ * switch (expr) {
+ * case A: stmt1;
+ * case B: stmt2;
+ * }
+ *
+ * compiles to this bytecode:
+ *
+ * # dispatch code - evaluate expr, check it against each `case`,
+ * # jump to the right place in the body or to the end.
+ * <expr>
+ * Dup; <A>; StrictEq; Case L1; JumpTarget
+ * Dup; <B>; StrictEq; Case L2; JumpTarget
+ * Default LE
+ *
+ * # body code
+ * L1: JumpTarget; <stmt1>
+ * L2: JumpTarget; <stmt2>
+ * LE: JumpTarget
+ *
+ * This opcode is weird: it's the only one whose ndefs varies depending on
+ * which way a conditional branch goes. We could implement switch
+ * statements using `JSOp::JumpIfTrue` and `JSOp::Pop`, but that would also
+ * be awkward--putting the `JSOp::Pop` inside the `switch` body would
+ * complicate fallthrough.
+ *
+ * Category: Control flow
+ * Type: Jumps
+ * Operands: int32_t forwardOffset
+ * Stack: val, cond => val (if !cond)
+ */ \
+ MACRO(Case, case_, NULL, 5, 2, 1, JOF_JUMP) \
+ /*
+ * Like `JSOp::Goto`, but pop and discard an additional stack value.
+ *
+ * This appears after all cases for a non-optimized `switch` statement. If
+ * there's a `default:` label, it jumps to that point in the body;
+ * otherwise it jumps to the next statement.
+ *
+ * Category: Control flow
+ * Type: Jumps
+ * Operands: int32_t forwardOffset
+ * Stack: lval =>
+ */ \
+ MACRO(Default, default_, NULL, 5, 1, 0, JOF_JUMP) \
+ /*
+ * Optimized switch-statement dispatch, used when all `case` labels are
+ * small integer constants.
+ *
+ * If `low <= i <= high`, jump to the instruction at the offset given by
+ * `script->resumeOffsets()[firstResumeIndex + i - low]`, in bytes from the
+ * start of the current script's bytecode. Otherwise, jump to the
+ * instruction at `defaultOffset` from the current instruction. All of
+ * these offsets must be in range for the current script and must point to
+ * `JSOp::JumpTarget` instructions.
+ *
+ * The following inequalities must hold: `low <= high` and
+ * `firstResumeIndex + high - low < resumeOffsets().size()`.
+ *
+ * Category: Control flow
+ * Type: Jumps
+ * Operands: int32_t defaultOffset, int32_t low, int32_t high,
+ * uint24_t firstResumeIndex
+ * Stack: i =>
+ */ \
+ MACRO(TableSwitch, table_switch, NULL, 16, 1, 0, JOF_TABLESWITCH) \
+ /*
+ * Return `rval`.
+ *
+ * This must not be used in derived class constructors. Instead use
+ * `JSOp::SetRval`, `JSOp::CheckReturn`, and `JSOp::RetRval`.
+ *
+ * Category: Control flow
+ * Type: Return
+ * Operands:
+ * Stack: rval =>
+ */ \
+ MACRO(Return, return_, NULL, 1, 1, 0, JOF_BYTE) \
+ /*
+ * Push the current stack frame's `returnValue`. If no `JSOp::SetRval`
+ * instruction has been executed in this stack frame, this is `undefined`.
+ *
+ * Every stack frame has a `returnValue` slot, used by top-level scripts,
+ * generators, async functions, and derived class constructors. Plain
+ * functions usually use `JSOp::Return` instead.
+ *
+ * Category: Control flow
+ * Type: Return
+ * Operands:
+ * Stack: => rval
+ */ \
+ MACRO(GetRval, get_rval, NULL, 1, 0, 1, JOF_BYTE) \
+ /*
+ * Store `rval` in the current stack frame's `returnValue` slot.
+ *
+ * This instruction must not be used in a toplevel script compiled with the
+ * `noScriptRval` option.
+ *
+ * Category: Control flow
+ * Type: Return
+ * Operands:
+ * Stack: rval =>
+ */ \
+ MACRO(SetRval, set_rval, NULL, 1, 1, 0, JOF_BYTE) \
+ /*
+ * Stop execution and return the current stack frame's `returnValue`. If no
+ * `JSOp::SetRval` instruction has been executed in this stack frame, this
+ * is `undefined`.
+ *
+ * Also emitted at end of every script so consumers don't need to worry
+ * about running off the end.
+ *
+ * If the current script is a derived class constructor, `returnValue` must
+ * be an object. The script can use `JSOp::CheckReturn` to ensure this.
+ *
+ * Category: Control flow
+ * Type: Return
+ * Operands:
+ * Stack: =>
+ */ \
+ MACRO(RetRval, ret_rval, NULL, 1, 0, 0, JOF_BYTE) \
+ /*
+ * Check the return value in a derived class constructor.
+ *
+ * - If the current stack frame's `returnValue` is an object, push
+ * `returnValue` onto the stack.
+ *
+ * - Otherwise, if the `returnValue` is undefined and `thisval` is an
+ * object, push `thisval` onto the stack.
+ *
+ * - Otherwise, throw a TypeError.
+ *
+ * This is exactly what has to happen when a derived class constructor
+ * returns. `thisval` should be the current value of `this`, or
+ * `MagicValue(JS_UNINITIALIZED_LEXICAL)` if `this` is uninitialized.
+ *
+ * Implements: [The [[Construct]] internal method of JS functions][1],
+ * steps 13 and 15.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-ecmascript-function-objects-construct-argumentslist-newtarget
+ *
+ * Category: Control flow
+ * Type: Return
+ * Operands:
+ * Stack: thisval => rval
+ */ \
+ MACRO(CheckReturn, check_return, NULL, 1, 1, 1, JOF_BYTE) \
+ /*
+ * Throw `exc`. (ノಠ益ಠ)ノ彡┴──┴
+ *
+ * This sets the pending exception to `exc` and jumps to error-handling
+ * code. If we're in a `try` block, error handling adjusts the stack and
+ * environment chain and resumes execution at the top of the `catch` or
+ * `finally` block. Otherwise it starts unwinding the stack.
+ *
+ * Implements: [*ThrowStatement* Evaluation][1], step 3.
+ *
+ * This is also used in for-of loops. If the body of the loop throws an
+ * exception, we catch it, close the iterator, then use `JSOp::Throw` to
+ * rethrow.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-throw-statement-runtime-semantics-evaluation
+ *
+ * Category: Control flow
+ * Type: Exceptions
+ * Operands:
+ * Stack: exc =>
+ */ \
+ MACRO(Throw, throw_, NULL, 1, 1, 0, JOF_BYTE) \
+ /*
+ * Create and throw an Error object.
+ *
+ * Sometimes we know at emit time that an operation always throws. For
+ * example, `delete super.prop;` is allowed in methods, but always throws a
+ * ReferenceError.
+ *
+ * `msgNumber` determines the `.message` and [[Prototype]] of the new Error
+ * object. It must be an error number in js/public/friend/ErrorNumbers.msg.
+ * The number of arguments in the error message must be 0.
+ *
+ * Category: Control flow
+ * Type: Exceptions
+ * Operands: ThrowMsgKind msgNumber
+ * Stack: =>
+ */ \
+ MACRO(ThrowMsg, throw_msg, NULL, 2, 0, 0, JOF_UINT8) \
+ /*
+ * Throws a runtime TypeError for invalid assignment to a `const` binding.
+ *
+ * Category: Control flow
+ * Type: Exceptions
+ * Operands: uint32_t nameIndex
+ * Stack:
+ */ \
+ MACRO(ThrowSetConst, throw_set_const, NULL, 5, 0, 0, JOF_ATOM|JOF_NAME) \
+ /*
+ * No-op instruction that marks the top of the bytecode for a
+ * *TryStatement*.
+ *
+ * Location information for catch/finally blocks is stored in a side table,
+ * `script->trynotes()`.
+ *
+ * Category: Control flow
+ * Type: Exceptions
+ * Operands:
+ * Stack: =>
+ */ \
+ MACRO(Try, try_, NULL, 1, 0, 0, JOF_BYTE) \
+ /*
+ * No-op instruction used by the exception unwinder to determine the
+ * correct environment to unwind to when performing IteratorClose due to
+ * destructuring.
+ *
+ * This instruction must appear immediately before each
+ * `JSTRY_DESTRUCTURING` span in a script's try notes.
+ *
+ * Category: Control flow
+ * Type: Exceptions
+ * Operands:
+ * Stack: =>
+ */ \
+ MACRO(TryDestructuring, try_destructuring, NULL, 1, 0, 0, JOF_BYTE) \
+ /*
+ * Push and clear the pending exception. ┬──┬◡ノ(° -°ノ)
+ *
+ * This must be used only in the fixed sequence of instructions following a
+ * `JSTRY_CATCH` span (see "Bytecode Invariants" above), as that's the only
+ * way instructions would run with an exception pending.
+ *
+ * Used to implement catch-blocks, including the implicit ones generated as
+ * part of for-of iteration.
+ *
+ * Category: Control flow
+ * Type: Exceptions
+ * Operands:
+ * Stack: => exception
+ */ \
+ MACRO(Exception, exception, NULL, 1, 0, 1, JOF_BYTE) \
+ /*
+ * No-op instruction that marks the start of a `finally` block.
+ *
+ * Category: Control flow
+ * Type: Exceptions
+ * Operands:
+ * Stack: =>
+ */ \
+ MACRO(Finally, finally, NULL, 1, 0, 0, JOF_BYTE) \
+ /*
+ * Push `MagicValue(JS_UNINITIALIZED_LEXICAL)`, a magic value used to mark
+ * a binding as uninitialized.
+ *
+ * This magic value must be used only by `JSOp::InitLexical`.
+ *
+ * Category: Variables and scopes
+ * Type: Initialization
+ * Operands:
+ * Stack: => uninitialized
+ */ \
+ MACRO(Uninitialized, uninitialized, NULL, 1, 0, 1, JOF_BYTE) \
+ /*
+ * Initialize an optimized local lexical binding; or mark it as
+ * uninitialized.
+ *
+ * This stores the value `v` in the fixed slot `localno` in the current
+ * stack frame. If `v` is the magic value produced by `JSOp::Uninitialized`,
+ * this marks the binding as uninitialized. Otherwise this initializes the
+ * binding with value `v`.
+ *
+ * Implements: [CreateMutableBinding][1] step 3, substep "record that it is
+ * uninitialized", and [InitializeBinding][2], for optimized locals. (Note:
+ * this is how `const` bindings are initialized.)
+ *
+ * [1]: https://tc39.es/ecma262/#sec-declarative-environment-records-createmutablebinding-n-d
+ * [2]: https://tc39.es/ecma262/#sec-declarative-environment-records-initializebinding-n-v
+ *
+ * Category: Variables and scopes
+ * Type: Initialization
+ * Operands: uint24_t localno
+ * Stack: v => v
+ */ \
+ MACRO(InitLexical, init_lexical, NULL, 4, 1, 1, JOF_LOCAL|JOF_NAME) \
+ /*
+ * Initialize a global lexical binding.
+ *
+ * The binding must already have been created by
+ * `GlobalOrEvalDeclInstantiation` and must be uninitialized.
+ *
+ * Like `JSOp::InitLexical` but for global lexicals. Unlike `InitLexical`
+ * this can't be used to mark a binding as uninitialized.
+ *
+ * Category: Variables and scopes
+ * Type: Initialization
+ * Operands: uint32_t nameIndex
+ * Stack: val => val
+ */ \
+ MACRO(InitGLexical, init_g_lexical, NULL, 5, 1, 1, JOF_ATOM|JOF_NAME|JOF_PROPINIT|JOF_GNAME|JOF_IC) \
+ /*
+ * Initialize an aliased lexical binding; or mark it as uninitialized.
+ *
+ * Like `JSOp::InitLexical` but for aliased bindings.
+ *
+ * Note: There is no even-less-optimized `InitName` instruction because JS
+ * doesn't need it. We always know statically which binding we're
+ * initializing.
+ *
+ * `hops` is usually 0, but in `function f(a=eval("var b;")) { }`, the
+ * argument `a` is initialized from inside a nested scope, so `hops == 1`.
+ *
+ * Category: Variables and scopes
+ * Type: Initialization
+ * Operands: uint8_t hops, uint24_t slot
+ * Stack: v => v
+ */ \
+ MACRO(InitAliasedLexical, init_aliased_lexical, NULL, 5, 1, 1, JOF_ENVCOORD|JOF_NAME|JOF_PROPINIT) \
+ /*
+ * Throw a ReferenceError if the value on top of the stack is uninitialized.
+ *
+ * Typically used after `JSOp::GetLocal` with the same `localno`.
+ *
+ * Implements: [GetBindingValue][1] step 3 and [SetMutableBinding][2] step
+ * 4 for declarative Environment Records.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-declarative-environment-records-getbindingvalue-n-s
+ * [2]: https://tc39.es/ecma262/#sec-declarative-environment-records-setmutablebinding-n-v-s
+ *
+ * Category: Variables and scopes
+ * Type: Initialization
+ * Operands: uint24_t localno
+ * Stack: v => v
+ */ \
+ MACRO(CheckLexical, check_lexical, NULL, 4, 1, 1, JOF_LOCAL|JOF_NAME) \
+ /*
+ * Like `JSOp::CheckLexical` but for aliased bindings.
+ *
+ * Typically used after `JSOp::GetAliasedVar` with the same hops/slot.
+ *
+ * Note: There are no `CheckName` or `CheckGName` instructions because
+ * they're unnecessary. `JSOp::{Get,Set}{Name,GName}` all check for
+ * uninitialized lexicals and throw if needed.
+ *
+ * Category: Variables and scopes
+ * Type: Initialization
+ * Operands: uint8_t hops, uint24_t slot
+ * Stack: v => v
+ */ \
+ MACRO(CheckAliasedLexical, check_aliased_lexical, NULL, 5, 1, 1, JOF_ENVCOORD|JOF_NAME) \
+ /*
+ * Throw a ReferenceError if the value on top of the stack is
+ * `MagicValue(JS_UNINITIALIZED_LEXICAL)`. Used in derived class
+ * constructors to check `this` (which needs to be initialized before use,
+ * by calling `super()`).
+ *
+ * Implements: [GetThisBinding][1] step 3.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-function-environment-records-getthisbinding
+ *
+ * Category: Variables and scopes
+ * Type: Initialization
+ * Operands:
+ * Stack: this => this
+ */ \
+ MACRO(CheckThis, check_this, NULL, 1, 1, 1, JOF_BYTE) \
+ /*
+ * Look up a name on the global lexical environment's chain and push the
+ * environment which contains a binding for that name. If no such binding
+ * exists, push the global lexical environment.
+ *
+ * Category: Variables and scopes
+ * Type: Looking up bindings
+ * Operands: uint32_t nameIndex
+ * Stack: => global
+ */ \
+ MACRO(BindGName, bind_g_name, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME|JOF_GNAME|JOF_IC) \
+ /*
+ * Look up a name on the environment chain and push the environment which
+ * contains a binding for that name. If no such binding exists, push the
+ * global lexical environment.
+ *
+ * Category: Variables and scopes
+ * Type: Looking up bindings
+ * Operands: uint32_t nameIndex
+ * Stack: => env
+ */ \
+ MACRO(BindName, bind_name, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME|JOF_IC|JOF_USES_ENV) \
+ /*
+ * Find a binding on the environment chain and push its value.
+ *
+ * If the binding is an uninitialized lexical, throw a ReferenceError. If
+ * no such binding exists, throw a ReferenceError unless the next
+ * instruction is `JSOp::Typeof`, in which case push `undefined`.
+ *
+ * Implements: [ResolveBinding][1] followed by [GetValue][2]
+ * (adjusted hackily for `typeof`).
+ *
+ * This is the fallback `Get` instruction that handles all unoptimized
+ * cases. Optimized instructions follow.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-resolvebinding
+ * [2]: https://tc39.es/ecma262/#sec-getvalue
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands: uint32_t nameIndex
+ * Stack: => val
+ */ \
+ MACRO(GetName, get_name, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME|JOF_IC|JOF_USES_ENV) \
+ /*
+ * Find a global binding and push its value.
+ *
+ * This searches the global lexical environment and, failing that, the
+ * global object. (Unlike most declarative environments, the global lexical
+ * environment can gain more bindings after compilation, possibly shadowing
+ * global object properties.)
+ *
+ * This is an optimized version of `JSOp::GetName` that skips all local
+ * scopes, for use when the name doesn't refer to any local binding.
+ * `NonSyntacticVariablesObject`s break this optimization, so if the
+ * current script has a non-syntactic global scope, use `JSOp::GetName`
+ * instead.
+ *
+ * Like `JSOp::GetName`, this throws a ReferenceError if no such binding is
+ * found (unless the next instruction is `JSOp::Typeof`) or if the binding
+ * is an uninitialized lexical.
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands: uint32_t nameIndex
+ * Stack: => val
+ */ \
+ MACRO(GetGName, get_g_name, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME|JOF_GNAME|JOF_IC) \
+ /*
+ * Push the value of an argument that is stored in the stack frame
+ * or in an `ArgumentsObject`.
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands: uint16_t argno
+ * Stack: => arguments[argno]
+ */ \
+ MACRO(GetArg, get_arg, NULL, 3, 0, 1, JOF_QARG|JOF_NAME) \
+ /*
+ * Push the value of an argument that is stored in the stack frame. Like
+ * `JSOp::GetArg`, but ignores the frame's `ArgumentsObject` and doesn't
+ * assert the argument is unaliased.
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands: uint16_t argno
+ * Stack: => arguments[argno]
+ */ \
+ MACRO(GetFrameArg, get_frame_arg, NULL, 3, 0, 1, JOF_QARG|JOF_NAME) \
+ /*
+ * Push the value of an optimized local variable.
+ *
+ * If the variable is an uninitialized lexical, push
+ * `MagicValue(JS_UNINIITALIZED_LEXICAL)`.
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands: uint24_t localno
+ * Stack: => val
+ */ \
+ MACRO(GetLocal, get_local, NULL, 4, 0, 1, JOF_LOCAL|JOF_NAME) \
+ /*
+ * Push the number of actual arguments as Int32Value.
+ *
+ * This is emitted for the ArgumentsLength() intrinsic in self-hosted code.
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands:
+ * Stack: => arguments.length
+ */ \
+ MACRO(ArgumentsLength, arguments_length, NULL, 1, 0, 1, JOF_BYTE) \
+ /*
+ * Push the value of an argument that is stored in the stack frame. The
+ * value on top of the stack must be an Int32Value storing the index. The
+ * index must be less than the number of actual arguments.
+ *
+ * This is emitted for the GetArgument(i) intrinsic in self-hosted code.
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands:
+ * Stack: index => arguments[index]
+ */ \
+ MACRO(GetActualArg, get_actual_arg, NULL, 1, 1, 1, JOF_BYTE) \
+ /*
+ * Push the value of an aliased binding.
+ *
+ * Local bindings that aren't closed over or dynamically accessed are
+ * stored in stack slots. Global and `with` bindings are object properties.
+ * All other bindings are called "aliased" and stored in
+ * `EnvironmentObject`s.
+ *
+ * Where possible, `Aliased` instructions are used to access aliased
+ * bindings. (There's no difference in meaning between `AliasedVar` and
+ * `AliasedLexical`.) Each of these instructions has operands `hops` and
+ * `slot` that encode an [`EnvironmentCoordinate`][1], directions to the
+ * binding from the current environment object.
+ *
+ * `Aliased` instructions can't be used when there's a dynamic scope (due
+ * to non-strict `eval` or `with`) that might shadow the aliased binding.
+ *
+ * [1]: https://searchfox.org/mozilla-central/search?q=symbol:T_js%3A%3AEnvironmentCoordinate
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands: uint8_t hops, uint24_t slot
+ * Stack: => aliasedVar
+ */ \
+ MACRO(GetAliasedVar, get_aliased_var, NULL, 5, 0, 1, JOF_ENVCOORD|JOF_NAME|JOF_USES_ENV) \
+ /*
+ * Push the value of an aliased binding, which may have to bypass a DebugEnvironmentProxy
+ * on the environment chain.
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands: uint8_t hops, uint24_t slot
+ * Stack: => aliasedVar
+ */ \
+ MACRO(GetAliasedDebugVar, get_aliased_debug_var, NULL, 5, 0, 1, JOF_DEBUGCOORD|JOF_NAME) \
+ /*
+ * Get the value of a module import by name and pushes it onto the stack.
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands: uint32_t nameIndex
+ * Stack: => val
+ */ \
+ MACRO(GetImport, get_import, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME) \
+ /*
+ * Get the value of a binding from the environment `env`. If the name is
+ * not bound in `env`, throw a ReferenceError.
+ *
+ * `env` must be an environment currently on the environment chain, pushed
+ * by `JSOp::BindName` or `JSOp::BindVar`.
+ *
+ * Note: `JSOp::BindName` and `JSOp::GetBoundName` are the two halves of the
+ * `JSOp::GetName` operation: finding and reading a variable. This
+ * decomposed version is needed to implement the compound assignment and
+ * increment/decrement operators, which get and then set a variable. The
+ * spec says the variable lookup is done only once. If we did the lookup
+ * twice, there would be observable bugs, thanks to dynamic scoping. We
+ * could set the wrong variable or call proxy traps incorrectly.
+ *
+ * Implements: [GetValue][1] steps 4 and 6.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-getvalue
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands: uint32_t nameIndex
+ * Stack: env => v
+ */ \
+ MACRO(GetBoundName, get_bound_name, NULL, 5, 1, 1, JOF_ATOM|JOF_NAME|JOF_IC) \
+ /*
+ * Push the value of an intrinsic onto the stack.
+ *
+ * Non-standard. Intrinsics are slots in the intrinsics holder object (see
+ * `GlobalObject::getIntrinsicsHolder`), which is used in lieu of global
+ * bindings in self-hosting code.
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands: uint32_t nameIndex
+ * Stack: => intrinsic[name]
+ */ \
+ MACRO(GetIntrinsic, get_intrinsic, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME|JOF_IC) \
+ /*
+ * Pushes the currently executing function onto the stack.
+ *
+ * The current script must be a function script.
+ *
+ * Used to implement `super`. This is also used sometimes as a minor
+ * optimization when a named function expression refers to itself by name:
+ *
+ * f = function fac(n) { ... fac(n - 1) ... };
+ *
+ * This lets us optimize away a lexical environment that contains only the
+ * binding for `fac`, unless it's otherwise observable (via `with`, `eval`,
+ * or a nested closure).
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands:
+ * Stack: => callee
+ */ \
+ MACRO(Callee, callee, NULL, 1, 0, 1, JOF_BYTE) \
+ /*
+ * Load the callee stored in a CallObject on the environment chain. The
+ * `numHops` operand is the number of environment objects to skip on the
+ * environment chain. The environment chain element indicated by `numHops`
+ * must be a CallObject.
+ *
+ * Category: Variables and scopes
+ * Type: Getting binding values
+ * Operands: uint8_t numHops
+ * Stack: => callee
+ */ \
+ MACRO(EnvCallee, env_callee, NULL, 2, 0, 1, JOF_UINT8) \
+ /*
+ * Assign `val` to the binding in `env` with the name given by `nameIndex`.
+ * Throw a ReferenceError if the binding is an uninitialized lexical.
+ * This can call setters and/or proxy traps.
+ *
+ * `env` must be an environment currently on the environment chain,
+ * pushed by `JSOp::BindName` or `JSOp::BindVar`.
+ *
+ * This is the fallback `Set` instruction that handles all unoptimized
+ * cases. Optimized instructions follow.
+ *
+ * Implements: [PutValue][1] steps 5 and 7 for unoptimized bindings.
+ *
+ * Note: `JSOp::BindName` and `JSOp::SetName` are the two halves of simple
+ * assignment: finding and setting a variable. They are two separate
+ * instructions because, per spec, the "finding" part happens before
+ * evaluating the right-hand side of the assignment, and the "setting" part
+ * after. Optimized cases don't need a `Bind` instruction because the
+ * "finding" is done statically.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-putvalue
+ *
+ * Category: Variables and scopes
+ * Type: Setting binding values
+ * Operands: uint32_t nameIndex
+ * Stack: env, val => val
+ */ \
+ MACRO(SetName, set_name, NULL, 5, 2, 1, JOF_ATOM|JOF_NAME|JOF_PROPSET|JOF_CHECKSLOPPY|JOF_IC|JOF_USES_ENV) \
+ /*
+ * Like `JSOp::SetName`, but throw a TypeError if there is no binding for
+ * the specified name in `env`, or if the binding is immutable (a `const`
+ * or read-only property).
+ *
+ * Implements: [PutValue][1] steps 5 and 7 for strict mode code.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-putvalue
+ *
+ * Category: Variables and scopes
+ * Type: Setting binding values
+ * Operands: uint32_t nameIndex
+ * Stack: env, val => val
+ */ \
+ MACRO(StrictSetName, strict_set_name, NULL, 5, 2, 1, JOF_ATOM|JOF_NAME|JOF_PROPSET|JOF_CHECKSTRICT|JOF_IC|JOF_USES_ENV) \
+ /*
+ * Like `JSOp::SetName`, but for assigning to globals. `env` must be an
+ * environment pushed by `JSOp::BindGName`.
+ *
+ * Category: Variables and scopes
+ * Type: Setting binding values
+ * Operands: uint32_t nameIndex
+ * Stack: env, val => val
+ */ \
+ MACRO(SetGName, set_g_name, NULL, 5, 2, 1, JOF_ATOM|JOF_NAME|JOF_PROPSET|JOF_GNAME|JOF_CHECKSLOPPY|JOF_IC) \
+ /*
+ * Like `JSOp::StrictSetGName`, but for assigning to globals. `env` must be
+ * an environment pushed by `JSOp::BindGName`.
+ *
+ * Category: Variables and scopes
+ * Type: Setting binding values
+ * Operands: uint32_t nameIndex
+ * Stack: env, val => val
+ */ \
+ MACRO(StrictSetGName, strict_set_g_name, NULL, 5, 2, 1, JOF_ATOM|JOF_NAME|JOF_PROPSET|JOF_GNAME|JOF_CHECKSTRICT|JOF_IC) \
+ /*
+ * Assign `val` to an argument binding that's stored in the stack frame or
+ * in an `ArgumentsObject`.
+ *
+ * Category: Variables and scopes
+ * Type: Setting binding values
+ * Operands: uint16_t argno
+ * Stack: val => val
+ */ \
+ MACRO(SetArg, set_arg, NULL, 3, 1, 1, JOF_QARG|JOF_NAME) \
+ /*
+ * Assign to an optimized local binding.
+ *
+ * Category: Variables and scopes
+ * Type: Setting binding values
+ * Operands: uint24_t localno
+ * Stack: v => v
+ */ \
+ MACRO(SetLocal, set_local, NULL, 4, 1, 1, JOF_LOCAL|JOF_NAME) \
+ /*
+ * Assign to an aliased binding.
+ *
+ * Implements: [SetMutableBinding for declarative Environment Records][1],
+ * in certain cases where it's known that the binding exists, is mutable,
+ * and has been initialized.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-declarative-environment-records-setmutablebinding-n-v-s
+ *
+ * Category: Variables and scopes
+ * Type: Setting binding values
+ * Operands: uint8_t hops, uint24_t slot
+ * Stack: val => val
+ */ \
+ MACRO(SetAliasedVar, set_aliased_var, NULL, 5, 1, 1, JOF_ENVCOORD|JOF_NAME|JOF_PROPSET|JOF_USES_ENV) \
+ /*
+ * Assign to an intrinsic.
+ *
+ * Nonstandard. Intrinsics are used in lieu of global bindings in self-
+ * hosted code. The value is actually stored in the intrinsics holder
+ * object, `GlobalObject::getIntrinsicsHolder`. (Self-hosted code doesn't
+ * have many global `var`s, but it has many `function`s.)
+ *
+ * Category: Variables and scopes
+ * Type: Setting binding values
+ * Operands: uint32_t nameIndex
+ * Stack: val => val
+ */ \
+ MACRO(SetIntrinsic, set_intrinsic, NULL, 5, 1, 1, JOF_ATOM|JOF_NAME) \
+ /*
+ * Push a lexical environment onto the environment chain.
+ *
+ * The `LexicalScope` indicated by `lexicalScopeIndex` determines the shape
+ * of the new `BlockLexicalEnvironmentObject`. All bindings in the new
+ * environment are marked as uninitialized.
+ *
+ * Implements: [Evaluation of *Block*][1], steps 1-4.
+ *
+ * #### Fine print for environment chain instructions
+ *
+ * The following rules for `JSOp::{Push,Pop}LexicalEnv` also apply to
+ * `JSOp::PushClassBodyEnv`, `JSOp::PushVarEnv`, and
+ * `JSOp::{Enter,Leave}With`.
+ *
+ * Each `JSOp::PopLexicalEnv` instruction matches a particular
+ * `JSOp::PushLexicalEnv` instruction in the same script and must have the
+ * same scope and stack depth as the instruction immediately after that
+ * `PushLexicalEnv`.
+ *
+ * `JSOp::PushLexicalEnv` enters a scope that extends to some set of
+ * instructions in the script. Code must not jump into or out of this
+ * region: control can enter only by executing `PushLexicalEnv` and can
+ * exit only by executing a `PopLexicalEnv` or by exception unwinding. (A
+ * `JSOp::PopLexicalEnv` is always emitted at the end of the block, and
+ * extra copies are emitted on "exit slides", where a `break`, `continue`,
+ * or `return` statement exits the scope.)
+ *
+ * The script's `JSScript::scopeNotes()` must identify exactly which
+ * instructions begin executing in this scope. Typically this means a
+ * single entry marking the contiguous chunk of bytecode from the
+ * instruction after `JSOp::PushLexicalEnv` to `JSOp::PopLexicalEnv`
+ * (inclusive); but if that range contains any instructions on exit slides,
+ * after a `JSOp::PopLexicalEnv`, then those must be correctly noted as
+ * *outside* the scope.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-block-runtime-semantics-evaluation
+ *
+ * Category: Variables and scopes
+ * Type: Entering and leaving environments
+ * Operands: uint32_t lexicalScopeIndex
+ * Stack: =>
+ */ \
+ MACRO(PushLexicalEnv, push_lexical_env, NULL, 5, 0, 0, JOF_SCOPE|JOF_USES_ENV) \
+ /*
+ * Pop a lexical or class-body environment from the environment chain.
+ *
+ * See `JSOp::PushLexicalEnv` for the fine print.
+ *
+ * Category: Variables and scopes
+ * Type: Entering and leaving environments
+ * Operands:
+ * Stack: =>
+ */ \
+ MACRO(PopLexicalEnv, pop_lexical_env, NULL, 1, 0, 0, JOF_BYTE|JOF_USES_ENV) \
+ /*
+ * No-op instruction that indicates leaving an optimized lexical scope.
+ *
+ * If all bindings in a lexical scope are optimized into stack slots, then
+ * the runtime environment objects for that scope are optimized away. No
+ * `JSOp::{Push,Pop}LexicalEnv` instructions are emitted. However, the
+ * debugger still needs to be notified when control exits a scope; that's
+ * what this instruction does.
+ *
+ * The last instruction in a lexical or class-body scope, as indicated by
+ * scope notes, must be either this instruction (if the scope is optimized)
+ * or `JSOp::PopLexicalEnv` (if not).
+ *
+ * Category: Variables and scopes
+ * Type: Entering and leaving environments
+ * Operands:
+ * Stack: =>
+ */ \
+ MACRO(DebugLeaveLexicalEnv, debug_leave_lexical_env, NULL, 1, 0, 0, JOF_BYTE) \
+ /*
+ * Replace the current block on the environment chain with a fresh block
+ * with uninitialized bindings. This implements the behavior of inducing a
+ * fresh lexical environment for every iteration of a for-in/of loop whose
+ * loop-head declares lexical variables that may be captured.
+ *
+ * The current environment must be a BlockLexicalEnvironmentObject.
+ *
+ * Category: Variables and scopes
+ * Type: Entering and leaving environments
+ * Operands: uint32_t lexicalScopeIndex
+ * Stack: =>
+ */ \
+ MACRO(RecreateLexicalEnv, recreate_lexical_env, NULL, 5, 0, 0, JOF_SCOPE) \
+ /*
+ * Like `JSOp::RecreateLexicalEnv`, but the values of all the bindings are
+ * copied from the old block to the new one. This is used for C-style
+ * `for(let ...; ...; ...)` loops.
+ *
+ * Category: Variables and scopes
+ * Type: Entering and leaving environments
+ * Operands: uint32_t lexicalScopeIndex
+ * Stack: =>
+ */ \
+ MACRO(FreshenLexicalEnv, freshen_lexical_env, NULL, 5, 0, 0, JOF_SCOPE) \
+ /*
+ * Push a ClassBody environment onto the environment chain.
+ *
+ * Like `JSOp::PushLexicalEnv`, but pushes a `ClassBodyEnvironmentObject`
+ * rather than a `BlockLexicalEnvironmentObject`. `JSOp::PopLexicalEnv` is
+ * used to pop class-body environments as well as lexical environments.
+ *
+ * See `JSOp::PushLexicalEnv` for the fine print.
+ *
+ * Category: Variables and scopes
+ * Type: Entering and leaving environments
+ * Operands: uint32_t lexicalScopeIndex
+ * Stack: =>
+ */ \
+ MACRO(PushClassBodyEnv, push_class_body_env, NULL, 5, 0, 0, JOF_SCOPE) \
+ /*
+ * Push a var environment onto the environment chain.
+ *
+ * Like `JSOp::PushLexicalEnv`, but pushes a `VarEnvironmentObject` rather
+ * than a `BlockLexicalEnvironmentObject`. The difference is that
+ * non-strict direct `eval` can add bindings to a var environment; see
+ * `VarScope` in Scope.h.
+ *
+ * See `JSOp::PushLexicalEnv` for the fine print.
+ *
+ * There is no corresponding `JSOp::PopVarEnv` operation, because a
+ * `VarEnvironmentObject` is never popped from the environment chain.
+ *
+ * Implements: Places in the spec where the VariableEnvironment is set:
+ *
+ * - The bit in [PerformEval][1] where, in strict direct eval, the new
+ * eval scope is taken as *varEnv* and becomes "*runningContext*'s
+ * VariableEnvironment".
+ *
+ * - The weird scoping rules for functions with default parameter
+ * expressions, as specified in [FunctionDeclarationInstantiation][2]
+ * step 28 ("NOTE: A separate Environment Record is needed...").
+ *
+ * Note: The spec also pushes a new VariableEnvironment on entry to every
+ * function, but the VM takes care of that as part of pushing the stack
+ * frame, before the function script starts to run, so `JSOp::PushVarEnv` is
+ * not needed.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-performeval
+ * [2]: https://tc39.es/ecma262/#sec-functiondeclarationinstantiation
+ *
+ * Category: Variables and scopes
+ * Type: Entering and leaving environments
+ * Operands: uint32_t scopeIndex
+ * Stack: =>
+ */ \
+ MACRO(PushVarEnv, push_var_env, NULL, 5, 0, 0, JOF_SCOPE|JOF_USES_ENV) \
+ /*
+ * Push a `WithEnvironmentObject` wrapping ToObject(`val`) to the
+ * environment chain.
+ *
+ * Implements: [Evaluation of `with` statements][1], steps 2-6.
+ *
+ * Operations that may need to consult a WithEnvironment can't be correctly
+ * implemented using optimized instructions like `JSOp::GetLocal`. A script
+ * must use the deoptimized `JSOp::GetName`, `BindName`, `SetName`, and
+ * `DelName` instead. Since those instructions don't work correctly with
+ * optimized locals and arguments, all bindings in scopes enclosing a
+ * `with` statement are marked as "aliased" and deoptimized too.
+ *
+ * See `JSOp::PushLexicalEnv` for the fine print.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-with-statement-runtime-semantics-evaluation
+ *
+ * Category: Variables and scopes
+ * Type: Entering and leaving environments
+ * Operands: uint32_t staticWithIndex
+ * Stack: val =>
+ */ \
+ MACRO(EnterWith, enter_with, NULL, 5, 1, 0, JOF_SCOPE) \
+ /*
+ * Pop a `WithEnvironmentObject` from the environment chain.
+ *
+ * See `JSOp::PushLexicalEnv` for the fine print.
+ *
+ * Implements: [Evaluation of `with` statements][1], step 8.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-with-statement-runtime-semantics-evaluation
+ *
+ * Category: Variables and scopes
+ * Type: Entering and leaving environments
+ * Operands:
+ * Stack: =>
+ */ \
+ MACRO(LeaveWith, leave_with, NULL, 1, 0, 0, JOF_BYTE) \
+ /*
+ * Push the current VariableEnvironment (the environment on the environment
+ * chain designated to receive new variables).
+ *
+ * Implements: [Annex B.3.3.1, changes to FunctionDeclarationInstantiation
+ * for block-level functions][1], step 1.a.ii.3.a, and similar steps in
+ * other Annex B.3.3 algorithms, when setting the function's second binding
+ * can't be optimized.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-web-compat-functiondeclarationinstantiation
+ *
+ * Category: Variables and scopes
+ * Type: Creating and deleting bindings
+ * Operands:
+ * Stack: => env
+ */ \
+ MACRO(BindVar, bind_var, NULL, 1, 0, 1, JOF_BYTE|JOF_USES_ENV) \
+ /*
+ * Check for conflicting bindings and then initialize them in global or
+ * sloppy eval scripts. This is required for global scripts with any
+ * top-level bindings, or any sloppy-eval scripts with any non-lexical
+ * top-level bindings.
+ *
+ * Implements: [GlobalDeclarationInstantiation][1] and
+ * [EvalDeclarationInstantiation][2] (except step 12).
+ *
+ * The `lastFun` argument is a GCThingIndex of the last hoisted top-level
+ * function that is part of top-level script initialization. The gcthings
+ * from index `0` thru `lastFun` contain only scopes and hoisted functions.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-globaldeclarationinstantiation
+ * [2]: https://tc39.es/ecma262/#sec-evaldeclarationinstantiation
+ *
+ * Category: Variables and scopes
+ * Type: Creating and deleting bindings
+ * Operands: uint32_t lastFun
+ * Stack: =>
+ */ \
+ MACRO(GlobalOrEvalDeclInstantiation, global_or_eval_decl_instantiation, NULL, 5, 0, 0, JOF_GCTHING|JOF_USES_ENV) \
+ /*
+ * Look up a variable on the environment chain and delete it. Push `true`
+ * on success (if a binding was deleted, or if no such binding existed in
+ * the first place), `false` otherwise (most kinds of bindings can't be
+ * deleted).
+ *
+ * Implements: [`delete` *Identifier*][1], which [is a SyntaxError][2] in
+ * strict mode code.
+ *
+ * [1]: https://tc39.es/ecma262/#sec-delete-operator-runtime-semantics-evaluation
+ * [2]: https://tc39.es/ecma262/#sec-delete-operator-static-semantics-early-errors
+ *
+ * Category: Variables and scopes
+ * Type: Creating and deleting bindings
+ * Operands: uint32_t nameIndex
+ * Stack: => succeeded
+ */ \
+ MACRO(DelName, del_name, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME|JOF_CHECKSLOPPY|JOF_USES_ENV) \
+ /*
+ * Create and push the `arguments` object for the current function activation.
+ *
+ * When it exists, `arguments` is stored in an ordinary local variable.
+ * `JSOp::Arguments` is used in function preludes, to populate that variable
+ * before the function body runs, *not* each time `arguments` appears in a
+ * function.
+ *
+ * If a function clearly doesn't use `arguments`, we optimize it away when
+ * emitting bytecode. The function's script won't use `JSOp::Arguments` at
+ * all.
+ *
+ * The current script must be a function script. This instruction must
+ * execute at most once per function activation.
+ *
+ * Category: Variables and scopes
+ * Type: Function environment setup
+ * Operands:
+ * Stack: => arguments
+ */ \
+ MACRO(Arguments, arguments, NULL, 1, 0, 1, JOF_BYTE|JOF_USES_ENV) \
+ /*
+ * Create and push the rest parameter array for current function call.
+ *
+ * This must appear only in a script for a function that has a rest
+ * parameter.
+ *
+ * Category: Variables and scopes
+ * Type: Function environment setup
+ * Operands:
+ * Stack: => rest
+ */ \
+ MACRO(Rest, rest, NULL, 1, 0, 1, JOF_BYTE|JOF_IC) \
+ /*
+ * Determines the `this` value for current function frame and pushes it
+ * onto the stack.
+ *
+ * In functions, `this` is stored in a local variable. This instruction is
+ * used in the function prologue to get the value to initialize that
+ * variable. (This doesn't apply to arrow functions, becauses they don't
+ * have a `this` binding; also, `this` is optimized away if it's unused.)
+ *
+ * Functions that have a `this` binding have a local variable named
+ * `".this"`, which is initialized using this instruction in the function
+ * prologue.
+ *
+ * In non-strict functions, `this` is always an object. Undefined/null
+ * `this` is converted into the global `this` value. Other primitive values
+ * are boxed. See `js::BoxNonStrictThis`.
+ *
+ * Category: Variables and scopes
+ * Type: Function environment setup
+ * Operands:
+ * Stack: => this
+ */ \
+ MACRO(FunctionThis, function_this, NULL, 1, 0, 1, JOF_BYTE) \
+ /*
+ * Pop the top value from the stack and discard it.
+ *
+ * Category: Stack operations
+ * Operands:
+ * Stack: v =>
+ */ \
+ MACRO(Pop, pop, NULL, 1, 1, 0, JOF_BYTE) \
+ /*
+ * Pop the top `n` values from the stack. `n` must be <= the current stack
+ * depth.
+ *
+ * Category: Stack operations
+ * Operands: uint16_t n
+ * Stack: v[n-1], ..., v[1], v[0] =>
+ */ \
+ MACRO(PopN, pop_n, NULL, 3, -1, 0, JOF_UINT16) \
+ /*
+ * Push a copy of the top value on the stack.
+ *
+ * Category: Stack operations
+ * Operands:
+ * Stack: v => v, v
+ */ \
+ MACRO(Dup, dup, NULL, 1, 1, 2, JOF_BYTE) \
+ /*
+ * Duplicate the top two values on the stack.
+ *
+ * Category: Stack operations
+ * Operands:
+ * Stack: v1, v2 => v1, v2, v1, v2
+ */ \
+ MACRO(Dup2, dup2, NULL, 1, 2, 4, JOF_BYTE) \
+ /*
+ * Push a copy of the nth value from the top of the stack.
+ *
+ * `n` must be less than the current stack depth.
+ *
+ * Category: Stack operations
+ * Operands: uint24_t n
+ * Stack: v[n], v[n-1], ..., v[1], v[0] =>
+ * v[n], v[n-1], ..., v[1], v[0], v[n]
+ */ \
+ MACRO(DupAt, dup_at, NULL, 4, 0, 1, JOF_UINT24) \
+ /*
+ * Swap the top two values on the stack.
+ *
+ * Category: Stack operations
+ * Operands:
+ * Stack: v1, v2 => v2, v1
+ */ \
+ MACRO(Swap, swap, NULL, 1, 2, 2, JOF_BYTE) \
+ /*
+ * Pick the nth element from the stack and move it to the top of the stack.
+ *
+ * Category: Stack operations
+ * Operands: uint8_t n
+ * Stack: v[n], v[n-1], ..., v[1], v[0] => v[n-1], ..., v[1], v[0], v[n]
+ */ \
+ MACRO(Pick, pick, NULL, 2, 0, 0, JOF_UINT8) \
+ /*
+ * Move the top of the stack value under the `n`th element of the stack.
+ * `n` must not be 0.
+ *
+ * Category: Stack operations
+ * Operands: uint8_t n
+ * Stack: v[n], v[n-1], ..., v[1], v[0] => v[0], v[n], v[n-1], ..., v[1]
+ */ \
+ MACRO(Unpick, unpick, NULL, 2, 0, 0, JOF_UINT8) \
+ /*
+ * Do nothing. This is used when we need distinct bytecode locations for
+ * various mechanisms.
+ *
+ * Category: Other
+ * Operands:
+ * Stack: =>
+ */ \
+ MACRO(Nop, nop, NULL, 1, 0, 0, JOF_BYTE) \
+ /*
+ * No-op instruction emitted immediately after `JSOp::*Eval` so that direct
+ * eval does not have to do slow pc-to-line mapping.
+ *
+ * The `lineno` operand should agree with this script's source notes about
+ * the line number of the preceding `*Eval` instruction.
+ *
+ * Category: Other
+ * Operands: uint32_t lineno
+ * Stack: =>
+ */ \
+ MACRO(Lineno, lineno, NULL, 5, 0, 0, JOF_UINT32) \
+ /*
+ * No-op instruction to hint that the top stack value is uninteresting.
+ *
+ * This affects only debug output and some error messages.
+ * In array destructuring, we emit bytecode that is roughly equivalent to
+ * `result.done ? undefined : result.value`.
+ * `NopDestructuring` is emitted after the `undefined`, so that the
+ * expression decompiler and disassembler know to casually ignore the
+ * possibility of `undefined`, and render the result of the conditional
+ * expression simply as "`result.value`".
+ *
+ * Category: Other
+ * Operands:
+ * Stack: =>
+ */ \
+ MACRO(NopDestructuring, nop_destructuring, NULL, 1, 0, 0, JOF_BYTE) \
+ /*
+ * No-op instruction only emitted in some self-hosted functions. Not
+ * handled by the JITs or Baseline Interpreter so the script always runs in
+ * the C++ interpreter.
+ *
+ * Category: Other
+ * Operands:
+ * Stack: =>
+ */ \
+ MACRO(ForceInterpreter, force_interpreter, NULL, 1, 0, 0, JOF_BYTE) \
+ /*
+ * Examine the top stack value, asserting that it's either a self-hosted
+ * function or a self-hosted intrinsic. This does nothing in a non-debug
+ * build.
+ *
+ * Category: Other
+ * Operands:
+ * Stack: checkVal => checkVal
+ */ \
+ MACRO(DebugCheckSelfHosted, debug_check_self_hosted, NULL, 1, 1, 1, JOF_BYTE) \
+ /*
+ * Break in the debugger, if one is attached. Otherwise this is a no-op.
+ *
+ * The [`Debugger` API][1] offers a way to hook into this instruction.
+ *
+ * Implements: [Evaluation for *DebuggerStatement*][2].
+ *
+ * [1]: https://developer.mozilla.org/en-US/docs/Tools/Debugger-API/Debugger
+ * [2]: https://tc39.es/ecma262/#sec-debugger-statement-runtime-semantics-evaluation
+ *
+ * Category: Other
+ * Operands:
+ * Stack: =>
+ */ \
+ MACRO(Debugger, debugger, NULL, 1, 0, 0, JOF_BYTE)
+
+// clang-format on
+
+/*
+ * In certain circumstances it may be useful to "pad out" the opcode space to
+ * a power of two. Use this macro to do so.
+ */
+#define FOR_EACH_TRAILING_UNUSED_OPCODE(MACRO) \
+ IF_RECORD_TUPLE(/* empty */, MACRO(230)) \
+ IF_RECORD_TUPLE(/* empty */, MACRO(231)) \
+ IF_RECORD_TUPLE(/* empty */, MACRO(232)) \
+ IF_RECORD_TUPLE(/* empty */, MACRO(233)) \
+ IF_RECORD_TUPLE(/* empty */, MACRO(234)) \
+ IF_RECORD_TUPLE(/* empty */, MACRO(235)) \
+ IF_RECORD_TUPLE(/* empty */, MACRO(236)) \
+ MACRO(237) \
+ MACRO(238) \
+ MACRO(239) \
+ MACRO(240) \
+ MACRO(241) \
+ MACRO(242) \
+ MACRO(243) \
+ MACRO(244) \
+ MACRO(245) \
+ MACRO(246) \
+ MACRO(247) \
+ MACRO(248) \
+ MACRO(249) \
+ MACRO(250) \
+ MACRO(251) \
+ MACRO(252) \
+ MACRO(253) \
+ MACRO(254) \
+ MACRO(255)
+
+namespace js {
+
+// Sanity check that opcode values and trailing unused opcodes completely cover
+// the [0, 256) range. Avert your eyes! You don't want to know how the
+// sausage gets made.
+
+// clang-format off
+#define PLUS_ONE(...) \
+ + 1
+constexpr int JSOP_LIMIT = 0 FOR_EACH_OPCODE(PLUS_ONE);
+#undef PLUS_ONE
+
+#define TRAILING_VALUE_AND_VALUE_PLUS_ONE(val) \
+ val) && (val + 1 ==
+static_assert((JSOP_LIMIT ==
+ FOR_EACH_TRAILING_UNUSED_OPCODE(TRAILING_VALUE_AND_VALUE_PLUS_ONE)
+ 256),
+ "trailing unused opcode values monotonically increase "
+ "from JSOP_LIMIT to 255");
+#undef TRAILING_VALUE_AND_VALUE_PLUS_ONE
+// clang-format on
+
+// Define JSOpLength_* constants for all ops.
+#define DEFINE_LENGTH_CONSTANT(op, op_snake, image, len, ...) \
+ constexpr size_t JSOpLength_##op = len;
+FOR_EACH_OPCODE(DEFINE_LENGTH_CONSTANT)
+#undef DEFINE_LENGTH_CONSTANT
+
+} // namespace js
+
+/*
+ * JS operation bytecodes.
+ */
+enum class JSOp : uint8_t {
+#define ENUMERATE_OPCODE(op, ...) op,
+ FOR_EACH_OPCODE(ENUMERATE_OPCODE)
+#undef ENUMERATE_OPCODE
+};
+
+#endif // vm_Opcodes_h
diff --git a/js/src/vm/PIC.cpp b/js/src/vm/PIC.cpp
new file mode 100644
index 0000000000..819b526d80
--- /dev/null
+++ b/js/src/vm/PIC.cpp
@@ -0,0 +1,372 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/PIC.h"
+
+#include "gc/GCContext.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/Realm.h"
+#include "vm/SelfHosting.h"
+
+#include "gc/GCContext-inl.h"
+#include "vm/JSContext-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+
+template <typename Category>
+void PICChain<Category>::addStub(JSObject* obj, CatStub* stub) {
+ MOZ_ASSERT(stub);
+ MOZ_ASSERT(!stub->next());
+
+ AddCellMemory(obj, sizeof(CatStub), MemoryUse::ForOfPICStub);
+
+ if (!stubs_) {
+ stubs_ = stub;
+ return;
+ }
+
+ CatStub* cur = stubs_;
+ while (cur->next()) {
+ cur = cur->next();
+ }
+ cur->append(stub);
+}
+
+bool js::ForOfPIC::Chain::initialize(JSContext* cx) {
+ MOZ_ASSERT(!initialized_);
+
+ // Get the canonical Array.prototype
+ Rooted<NativeObject*> arrayProto(
+ cx, GlobalObject::getOrCreateArrayPrototype(cx, cx->global()));
+ if (!arrayProto) {
+ return false;
+ }
+
+ // Get the canonical ArrayIterator.prototype
+ Rooted<NativeObject*> arrayIteratorProto(
+ cx, GlobalObject::getOrCreateArrayIteratorPrototype(cx, cx->global()));
+ if (!arrayIteratorProto) {
+ return false;
+ }
+
+ // From this point on, we can't fail. Set initialized and fill the fields
+ // for the canonical Array.prototype and ArrayIterator.prototype objects.
+ initialized_ = true;
+ arrayProto_ = arrayProto;
+ arrayIteratorProto_ = arrayIteratorProto;
+
+ // Shortcut returns below means Array for-of will never be optimizable,
+ // do set disabled_ now, and clear it later when we succeed.
+ disabled_ = true;
+
+ // Look up Array.prototype[@@iterator], ensure it's a slotful shape.
+ mozilla::Maybe<PropertyInfo> iterProp = arrayProto->lookup(
+ cx, PropertyKey::Symbol(cx->wellKnownSymbols().iterator));
+ if (iterProp.isNothing() || !iterProp->isDataProperty()) {
+ return true;
+ }
+
+ // Get the referred value, and ensure it holds the canonical ArrayValues
+ // function.
+ Value iterator = arrayProto->getSlot(iterProp->slot());
+ JSFunction* iterFun;
+ if (!IsFunctionObject(iterator, &iterFun)) {
+ return true;
+ }
+ if (!IsSelfHostedFunctionWithName(iterFun, cx->names().ArrayValues)) {
+ return true;
+ }
+
+ // Look up the 'next' value on ArrayIterator.prototype
+ mozilla::Maybe<PropertyInfo> nextProp =
+ arrayIteratorProto->lookup(cx, cx->names().next);
+ if (nextProp.isNothing() || !nextProp->isDataProperty()) {
+ return true;
+ }
+
+ // Get the referred value, ensure it holds the canonical ArrayIteratorNext
+ // function.
+ Value next = arrayIteratorProto->getSlot(nextProp->slot());
+ JSFunction* nextFun;
+ if (!IsFunctionObject(next, &nextFun)) {
+ return true;
+ }
+ if (!IsSelfHostedFunctionWithName(nextFun, cx->names().ArrayIteratorNext)) {
+ return true;
+ }
+
+ disabled_ = false;
+ arrayProtoShape_ = arrayProto->shape();
+ arrayProtoIteratorSlot_ = iterProp->slot();
+ canonicalIteratorFunc_ = iterator;
+ arrayIteratorProtoShape_ = arrayIteratorProto->shape();
+ arrayIteratorProtoNextSlot_ = nextProp->slot();
+ canonicalNextFunc_ = next;
+ return true;
+}
+
+bool js::ForOfPIC::Chain::tryOptimizeArray(JSContext* cx,
+ Handle<ArrayObject*> array,
+ bool* optimized) {
+ MOZ_ASSERT(optimized);
+
+ *optimized = false;
+
+ if (!initialized_) {
+ // If PIC is not initialized, initialize it.
+ if (!initialize(cx)) {
+ return false;
+ }
+
+ } else if (!disabled_ && !isArrayStateStillSane()) {
+ // Otherwise, if array state is no longer sane, reinitialize.
+ reset(cx);
+
+ if (!initialize(cx)) {
+ return false;
+ }
+ }
+ MOZ_ASSERT(initialized_);
+
+ // If PIC is disabled, don't bother trying to optimize.
+ if (disabled_) {
+ return true;
+ }
+
+ // By the time we get here, we should have a sane array state to work with.
+ MOZ_ASSERT(isArrayStateStillSane());
+
+ // Ensure array's prototype is the actual Array.prototype
+ if (array->staticPrototype() != arrayProto_) {
+ return true;
+ }
+
+ // Check if stub already exists.
+ if (hasMatchingStub(array)) {
+ *optimized = true;
+ return true;
+ }
+
+ // Ensure array doesn't define @@iterator directly.
+ if (array->lookup(cx, PropertyKey::Symbol(cx->wellKnownSymbols().iterator))) {
+ return true;
+ }
+
+ // If the number of stubs is about to exceed the limit, throw away entire
+ // existing cache before adding new stubs. We shouldn't really have heavy
+ // churn on these.
+ if (numStubs() >= MAX_STUBS) {
+ eraseChain(cx);
+ }
+
+ // Good to optimize now, create stub to add.
+ Rooted<Shape*> shape(cx, array->shape());
+ Stub* stub = cx->new_<Stub>(shape);
+ if (!stub) {
+ return false;
+ }
+
+ // Add the stub.
+ addStub(picObject_, stub);
+
+ *optimized = true;
+ return true;
+}
+
+bool js::ForOfPIC::Chain::tryOptimizeArrayIteratorNext(JSContext* cx,
+ bool* optimized) {
+ MOZ_ASSERT(optimized);
+
+ *optimized = false;
+
+ if (!initialized_) {
+ // If PIC is not initialized, initialize it.
+ if (!initialize(cx)) {
+ return false;
+ }
+ } else if (!disabled_ && !isArrayNextStillSane()) {
+ // Otherwise, if array iterator state is no longer sane, reinitialize.
+ reset(cx);
+
+ if (!initialize(cx)) {
+ return false;
+ }
+ }
+ MOZ_ASSERT(initialized_);
+
+ // If PIC is disabled, don't bother trying to optimize.
+ if (disabled_) {
+ return true;
+ }
+
+ // By the time we get here, we should have a sane iterator state to work with.
+ MOZ_ASSERT(isArrayNextStillSane());
+
+ *optimized = true;
+ return true;
+}
+
+bool js::ForOfPIC::Chain::hasMatchingStub(ArrayObject* obj) {
+ // Ensure PIC is initialized and not disabled.
+ MOZ_ASSERT(initialized_ && !disabled_);
+
+ // Check if there is a matching stub.
+ for (Stub* stub = stubs(); stub != nullptr; stub = stub->next()) {
+ if (stub->shape() == obj->shape()) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool js::ForOfPIC::Chain::isArrayStateStillSane() {
+ // Ensure that canonical Array.prototype has matching shape.
+ if (arrayProto_->shape() != arrayProtoShape_) {
+ return false;
+ }
+
+ // Ensure that Array.prototype[@@iterator] contains the
+ // canonical iterator function.
+ if (arrayProto_->getSlot(arrayProtoIteratorSlot_) != canonicalIteratorFunc_) {
+ return false;
+ }
+
+ // Chain to isArrayNextStillSane.
+ return isArrayNextStillSane();
+}
+
+void js::ForOfPIC::Chain::reset(JSContext* cx) {
+ // Should never reset a disabled_ stub.
+ MOZ_ASSERT(!disabled_);
+
+ // Erase the chain.
+ eraseChain(cx);
+
+ arrayProto_ = nullptr;
+ arrayIteratorProto_ = nullptr;
+
+ arrayProtoShape_ = nullptr;
+ arrayProtoIteratorSlot_ = -1;
+ canonicalIteratorFunc_ = UndefinedValue();
+
+ arrayIteratorProtoShape_ = nullptr;
+ arrayIteratorProtoNextSlot_ = -1;
+ canonicalNextFunc_ = UndefinedValue();
+
+ initialized_ = false;
+}
+
+void js::ForOfPIC::Chain::eraseChain(JSContext* cx) {
+ // Should never need to clear the chain of a disabled stub.
+ MOZ_ASSERT(!disabled_);
+ freeAllStubs(cx->gcContext());
+}
+
+// Trace the pointers stored directly on the stub.
+void js::ForOfPIC::Chain::trace(JSTracer* trc) {
+ TraceEdge(trc, &picObject_, "ForOfPIC object");
+
+ if (!initialized_ || disabled_) {
+ return;
+ }
+
+ TraceEdge(trc, &arrayProto_, "ForOfPIC Array.prototype.");
+ TraceEdge(trc, &arrayIteratorProto_, "ForOfPIC ArrayIterator.prototype.");
+
+ TraceEdge(trc, &arrayProtoShape_, "ForOfPIC Array.prototype shape.");
+ TraceEdge(trc, &arrayIteratorProtoShape_,
+ "ForOfPIC ArrayIterator.prototype shape.");
+
+ TraceEdge(trc, &canonicalIteratorFunc_, "ForOfPIC ArrayValues builtin.");
+ TraceEdge(trc, &canonicalNextFunc_,
+ "ForOfPIC ArrayIterator.prototype.next builtin.");
+
+ for (Stub* stub = stubs_; stub; stub = stub->next()) {
+ stub->trace(trc);
+ }
+}
+
+void js::ForOfPIC::Stub::trace(JSTracer* trc) {
+ TraceEdge(trc, &shape_, "ForOfPIC::Stub::shape_");
+}
+
+static void ForOfPIC_finalize(JS::GCContext* gcx, JSObject* obj) {
+ if (ForOfPIC::Chain* chain =
+ ForOfPIC::fromJSObject(&obj->as<NativeObject>())) {
+ chain->finalize(gcx, obj);
+ }
+}
+
+void js::ForOfPIC::Chain::finalize(JS::GCContext* gcx, JSObject* obj) {
+ freeAllStubs(gcx);
+ gcx->delete_(obj, this, MemoryUse::ForOfPIC);
+}
+
+void js::ForOfPIC::Chain::freeAllStubs(JS::GCContext* gcx) {
+ Stub* stub = stubs_;
+ while (stub) {
+ Stub* next = stub->next();
+ gcx->delete_(picObject_, stub, MemoryUse::ForOfPICStub);
+ stub = next;
+ }
+ stubs_ = nullptr;
+}
+
+static void ForOfPIC_traceObject(JSTracer* trc, JSObject* obj) {
+ if (ForOfPIC::Chain* chain =
+ ForOfPIC::fromJSObject(&obj->as<NativeObject>())) {
+ chain->trace(trc);
+ }
+}
+
+static const JSClassOps ForOfPICClassOps = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ ForOfPIC_finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ ForOfPIC_traceObject, // trace
+};
+
+const JSClass ForOfPICObject::class_ = {
+ "ForOfPIC",
+ JSCLASS_HAS_RESERVED_SLOTS(SlotCount) | JSCLASS_BACKGROUND_FINALIZE,
+ &ForOfPICClassOps};
+
+/* static */
+NativeObject* js::ForOfPIC::createForOfPICObject(JSContext* cx,
+ Handle<GlobalObject*> global) {
+ cx->check(global);
+ ForOfPICObject* obj =
+ NewTenuredObjectWithGivenProto<ForOfPICObject>(cx, nullptr);
+ if (!obj) {
+ return nullptr;
+ }
+ ForOfPIC::Chain* chain = cx->new_<ForOfPIC::Chain>(obj);
+ if (!chain) {
+ return nullptr;
+ }
+ InitReservedSlot(obj, ForOfPICObject::ChainSlot, chain, MemoryUse::ForOfPIC);
+ return obj;
+}
+
+/* static */ js::ForOfPIC::Chain* js::ForOfPIC::create(JSContext* cx) {
+ MOZ_ASSERT(!cx->global()->getForOfPICObject());
+ Rooted<GlobalObject*> global(cx, cx->global());
+ NativeObject* obj = GlobalObject::getOrCreateForOfPICObject(cx, global);
+ if (!obj) {
+ return nullptr;
+ }
+ return fromJSObject(obj);
+}
diff --git a/js/src/vm/PIC.h b/js/src/vm/PIC.h
new file mode 100644
index 0000000000..e2f8cb37a6
--- /dev/null
+++ b/js/src/vm/PIC.h
@@ -0,0 +1,246 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_PIC_h
+#define vm_PIC_h
+
+#include "vm/GlobalObject.h"
+#include "vm/NativeObject.h"
+
+namespace js {
+
+class Shape;
+
+template <typename Category>
+class PICChain;
+
+/*
+ * The basic PICStub just has a pointer to the next stub.
+ */
+template <typename Category>
+class PICStub {
+ friend class PICChain<Category>;
+
+ private:
+ using CatStub = typename Category::Stub;
+ using CatChain = typename Category::Chain;
+
+ protected:
+ CatStub* next_;
+
+ PICStub() : next_(nullptr) {}
+ explicit PICStub(const CatStub* next) : next_(next) { MOZ_ASSERT(next_); }
+ explicit PICStub(const CatStub& other) : next_(other.next_) {}
+
+ public:
+ CatStub* next() const { return next_; }
+
+ protected:
+ void append(CatStub* stub) {
+ MOZ_ASSERT(!next_);
+ MOZ_ASSERT(!stub->next_);
+ next_ = stub;
+ }
+};
+
+/*
+ * The basic PIC just has a pointer to the list of stubs.
+ */
+template <typename Category>
+class PICChain {
+ private:
+ using CatStub = typename Category::Stub;
+ using CatChain = typename Category::Chain;
+
+ protected:
+ CatStub* stubs_;
+
+ PICChain() : stubs_(nullptr) {}
+ // PICs should never be copy constructed.
+ PICChain(const PICChain<Category>& other) = delete;
+
+ public:
+ CatStub* stubs() const { return stubs_; }
+
+ void addStub(JSObject* obj, CatStub* stub);
+
+ unsigned numStubs() const {
+ unsigned count = 0;
+ for (CatStub* stub = stubs_; stub; stub = stub->next()) {
+ count++;
+ }
+ return count;
+ }
+};
+
+// Class for object that holds ForOfPIC chain.
+class ForOfPICObject : public NativeObject {
+ public:
+ enum { ChainSlot, SlotCount };
+
+ static const JSClass class_;
+};
+
+/*
+ * ForOfPIC defines a PIC category for optimizing for-of operations.
+ */
+struct ForOfPIC {
+ /* Forward declarations so template-substitution works. */
+ class Stub;
+ class Chain;
+
+ ForOfPIC() = delete;
+ ForOfPIC(const ForOfPIC& other) = delete;
+
+ using BaseStub = PICStub<ForOfPIC>;
+ using BaseChain = PICChain<ForOfPIC>;
+
+ /*
+ * A ForOfPIC has only one kind of stub for now: one that holds the shape
+ * of an array object that does not override its @@iterator property.
+ */
+ class Stub : public BaseStub {
+ private:
+ // Shape of matching array object.
+ const HeapPtr<Shape*> shape_;
+
+ public:
+ explicit Stub(Shape* shape) : BaseStub(), shape_(shape) {
+ MOZ_ASSERT(shape_);
+ }
+
+ Shape* shape() { return shape_; }
+
+ void trace(JSTracer* trc);
+ };
+
+ /*
+ * A ForOfPIC chain holds the following:
+ *
+ * Array.prototype (arrayProto_)
+ * To ensure that the incoming array has the standard proto.
+ *
+ * Array.prototype's shape (arrayProtoShape_)
+ * To ensure that Array.prototype has not been modified.
+ *
+ * ArrayIterator.prototype
+ * ArrayIterator.prototype's shape
+ * (arrayIteratorProto_, arrayIteratorProtoShape_)
+ * To ensure that an ArrayIterator.prototype has not been modified.
+ *
+ * Array.prototype's slot number for @@iterator
+ * Array.prototype's canonical value for @@iterator
+ * (arrayProtoIteratorSlot_, canonicalIteratorFunc_)
+ * To quickly retrieve and ensure that the iterator constructor
+ * stored in the slot has not changed.
+ *
+ * ArrayIterator.prototype's slot number for 'next'
+ * ArrayIterator.prototype's canonical value for 'next'
+ * (arrayIteratorProtoNextSlot_, canonicalNextFunc_)
+ * To quickly retrieve and ensure that the 'next' method for
+ * ArrayIterator objects has not changed.
+ */
+ class Chain : public BaseChain {
+ private:
+ // Pointer to owning JSObject for memory accounting purposes.
+ const GCPtr<JSObject*> picObject_;
+
+ // Pointer to canonical Array.prototype and ArrayIterator.prototype
+ GCPtr<NativeObject*> arrayProto_;
+ GCPtr<NativeObject*> arrayIteratorProto_;
+
+ // Shape of matching Array.prototype object, and slot containing
+ // the @@iterator for it, and the canonical value.
+ GCPtr<Shape*> arrayProtoShape_;
+ uint32_t arrayProtoIteratorSlot_;
+ GCPtr<Value> canonicalIteratorFunc_;
+
+ // Shape of matching ArrayIteratorProto, and slot containing
+ // the 'next' property, and the canonical value.
+ GCPtr<Shape*> arrayIteratorProtoShape_;
+ uint32_t arrayIteratorProtoNextSlot_;
+ GCPtr<Value> canonicalNextFunc_;
+
+ // Initialization flag marking lazy initialization of above fields.
+ bool initialized_;
+
+ // Disabled flag is set when we don't want to try optimizing anymore
+ // because core objects were changed.
+ bool disabled_;
+
+ static const unsigned MAX_STUBS = 10;
+
+ public:
+ explicit Chain(JSObject* picObject)
+ : BaseChain(),
+ picObject_(picObject),
+ arrayProto_(nullptr),
+ arrayIteratorProto_(nullptr),
+ arrayProtoShape_(nullptr),
+ arrayProtoIteratorSlot_(-1),
+ canonicalIteratorFunc_(UndefinedValue()),
+ arrayIteratorProtoShape_(nullptr),
+ arrayIteratorProtoNextSlot_(-1),
+ initialized_(false),
+ disabled_(false) {}
+
+ // Initialize the canonical iterator function.
+ bool initialize(JSContext* cx);
+
+ // Try to optimize this chain for an object.
+ bool tryOptimizeArray(JSContext* cx, Handle<ArrayObject*> array,
+ bool* optimized);
+
+ // Check if %ArrayIteratorPrototype% still uses the default "next" method.
+ bool tryOptimizeArrayIteratorNext(JSContext* cx, bool* optimized);
+
+ void trace(JSTracer* trc);
+ void finalize(JS::GCContext* gcx, JSObject* obj);
+
+ void freeAllStubs(JS::GCContext* gcx);
+
+ private:
+ // Check if the global array-related objects have not been messed with
+ // in a way that would disable this PIC.
+ bool isArrayStateStillSane();
+
+ // Check if ArrayIterator.next is still optimizable.
+ inline bool isArrayNextStillSane() {
+ return (arrayIteratorProto_->shape() == arrayIteratorProtoShape_) &&
+ (arrayIteratorProto_->getSlot(arrayIteratorProtoNextSlot_) ==
+ canonicalNextFunc_);
+ }
+
+ // Check if a matching optimized stub for the given object exists.
+ bool hasMatchingStub(ArrayObject* obj);
+
+ // Reset the PIC and all info associated with it.
+ void reset(JSContext* cx);
+
+ // Erase the stub chain.
+ void eraseChain(JSContext* cx);
+ };
+
+ static NativeObject* createForOfPICObject(JSContext* cx,
+ Handle<GlobalObject*> global);
+
+ static inline Chain* fromJSObject(NativeObject* obj) {
+ MOZ_ASSERT(obj->is<ForOfPICObject>());
+ return obj->maybePtrFromReservedSlot<Chain>(ForOfPICObject::ChainSlot);
+ }
+ static inline Chain* getOrCreate(JSContext* cx) {
+ NativeObject* obj = cx->global()->getForOfPICObject();
+ if (obj) {
+ return fromJSObject(obj);
+ }
+ return create(cx);
+ }
+ static Chain* create(JSContext* cx);
+};
+
+} // namespace js
+
+#endif /* vm_PIC_h */
diff --git a/js/src/vm/PlainObject-inl.h b/js/src/vm/PlainObject-inl.h
new file mode 100644
index 0000000000..2986577d5e
--- /dev/null
+++ b/js/src/vm/PlainObject-inl.h
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_PlainObject_inl_h
+#define vm_PlainObject_inl_h
+
+#include "vm/PlainObject.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_ASSERT_IF
+#include "mozilla/Attributes.h" // MOZ_ALWAYS_INLINE
+
+#include "gc/AllocKind.h" // js::gc::Heap
+#include "js/RootingAPI.h" // JS::Handle, JS::Rooted, JS::MutableHandle
+#include "js/Value.h" // JS::Value, JS_IS_CONSTRUCTING
+#include "vm/JSFunction.h" // JSFunction
+#include "vm/JSObject.h" // js::GenericObject, js::NewObjectKind
+#include "vm/NativeObject.h" // js::NativeObject::create
+#include "vm/Shape.h" // js::Shape
+
+#include "gc/ObjectKind-inl.h" // js::gc::GetGCObjectKind
+#include "vm/JSObject-inl.h" // js::GetInitialHeap, js::NewBuiltinClassInstance
+#include "vm/NativeObject-inl.h" // js::NativeObject::{create,setLastProperty}
+
+/* static */ inline js::PlainObject* js::PlainObject::createWithShape(
+ JSContext* cx, JS::Handle<SharedShape*> shape, gc::AllocKind kind,
+ NewObjectKind newKind) {
+ MOZ_ASSERT(shape->getObjectClass() == &PlainObject::class_);
+ gc::Heap heap = GetInitialHeap(newKind, &PlainObject::class_);
+
+ MOZ_ASSERT(gc::CanChangeToBackgroundAllocKind(kind, &PlainObject::class_));
+ kind = gc::ForegroundToBackgroundAllocKind(kind);
+
+ NativeObject* obj = NativeObject::create(cx, kind, heap, shape);
+ if (!obj) {
+ return nullptr;
+ }
+
+ return &obj->as<PlainObject>();
+}
+
+/* static */ inline js::PlainObject* js::PlainObject::createWithShape(
+ JSContext* cx, JS::Handle<SharedShape*> shape, NewObjectKind newKind) {
+ gc::AllocKind kind = gc::GetGCObjectKind(shape->numFixedSlots());
+ return createWithShape(cx, shape, kind, newKind);
+}
+
+/* static */ inline js::PlainObject* js::PlainObject::createWithTemplate(
+ JSContext* cx, JS::Handle<PlainObject*> templateObject) {
+ JS::Rooted<SharedShape*> shape(cx, templateObject->sharedShape());
+ return createWithShape(cx, shape);
+}
+
+inline js::gc::AllocKind js::PlainObject::allocKindForTenure() const {
+ gc::AllocKind kind = gc::GetGCObjectFixedSlotsKind(numFixedSlots());
+ MOZ_ASSERT(!gc::IsBackgroundFinalized(kind));
+ MOZ_ASSERT(gc::CanChangeToBackgroundAllocKind(kind, getClass()));
+ return gc::ForegroundToBackgroundAllocKind(kind);
+}
+
+namespace js {
+
+static MOZ_ALWAYS_INLINE bool CreateThis(JSContext* cx,
+ JS::Handle<JSFunction*> callee,
+ JS::Handle<JSObject*> newTarget,
+ NewObjectKind newKind,
+ JS::MutableHandle<JS::Value> thisv) {
+ if (callee->constructorNeedsUninitializedThis()) {
+ thisv.setMagic(JS_UNINITIALIZED_LEXICAL);
+ return true;
+ }
+
+ MOZ_ASSERT(thisv.isMagic(JS_IS_CONSTRUCTING));
+
+ Rooted<SharedShape*> shape(cx, ThisShapeForFunction(cx, callee, newTarget));
+ if (!shape) {
+ return false;
+ }
+
+ PlainObject* obj = PlainObject::createWithShape(cx, shape, newKind);
+ if (!obj) {
+ return false;
+ }
+
+ MOZ_ASSERT(obj->nonCCWRealm() == callee->realm());
+ thisv.setObject(*obj);
+ return true;
+}
+
+} // namespace js
+
+#endif // vm_PlainObject_inl_h
diff --git a/js/src/vm/PlainObject.cpp b/js/src/vm/PlainObject.cpp
new file mode 100644
index 0000000000..e238203e27
--- /dev/null
+++ b/js/src/vm/PlainObject.cpp
@@ -0,0 +1,334 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * JS object implementation.
+ */
+
+#include "vm/PlainObject-inl.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include "jspubtd.h" // JSProto_Object
+
+#include "ds/IdValuePair.h" // js::IdValuePair
+#include "gc/AllocKind.h" // js::gc::AllocKind
+#include "vm/JSContext.h" // JSContext
+#include "vm/JSFunction.h" // JSFunction
+#include "vm/JSObject.h" // JSObject, js::GetPrototypeFromConstructor
+#include "vm/TaggedProto.h" // js::TaggedProto
+
+#include "vm/JSFunction-inl.h"
+#include "vm/JSObject-inl.h" // js::NewObjectWithGroup, js::NewObjectGCKind
+
+using namespace js;
+
+using JS::Handle;
+using JS::Rooted;
+
+static MOZ_ALWAYS_INLINE SharedShape* GetPlainObjectShapeWithProto(
+ JSContext* cx, JSObject* proto, gc::AllocKind kind) {
+ MOZ_ASSERT(JSCLASS_RESERVED_SLOTS(&PlainObject::class_) == 0,
+ "all slots can be used for properties");
+ uint32_t nfixed = GetGCKindSlots(kind);
+ return SharedShape::getInitialShape(cx, &PlainObject::class_, cx->realm(),
+ TaggedProto(proto), nfixed);
+}
+
+SharedShape* js::ThisShapeForFunction(JSContext* cx, Handle<JSFunction*> callee,
+ Handle<JSObject*> newTarget) {
+ MOZ_ASSERT(cx->realm() == callee->realm());
+ MOZ_ASSERT(!callee->constructorNeedsUninitializedThis());
+
+ Rooted<JSObject*> proto(cx);
+ if (!GetPrototypeFromConstructor(cx, newTarget, JSProto_Object, &proto)) {
+ return nullptr;
+ }
+
+ js::gc::AllocKind allocKind = NewObjectGCKind();
+ if (!JSFunction::getAllocKindForThis(cx, callee, allocKind)) {
+ return nullptr;
+ }
+
+ SharedShape* res;
+ if (proto && proto != cx->global()->maybeGetPrototype(JSProto_Object)) {
+ res = GetPlainObjectShapeWithProto(cx, proto, allocKind);
+ } else {
+ res = GlobalObject::getPlainObjectShapeWithDefaultProto(cx, allocKind);
+ }
+
+ MOZ_ASSERT_IF(res, res->realm() == callee->realm());
+
+ return res;
+}
+
+#ifdef DEBUG
+void PlainObject::assertHasNoNonWritableOrAccessorPropExclProto() const {
+ // Check the most recent MaxCount properties to not slow down debug builds too
+ // much.
+ static constexpr size_t MaxCount = 8;
+
+ size_t count = 0;
+ PropertyName* protoName = runtimeFromMainThread()->commonNames->proto;
+
+ for (ShapePropertyIter<NoGC> iter(shape()); !iter.done(); iter++) {
+ // __proto__ is always allowed.
+ if (iter->key().isAtom(protoName)) {
+ continue;
+ }
+
+ MOZ_ASSERT(iter->isDataProperty());
+ MOZ_ASSERT(iter->writable());
+
+ count++;
+ if (count > MaxCount) {
+ return;
+ }
+ }
+}
+#endif
+
+// static
+PlainObject* PlainObject::createWithTemplateFromDifferentRealm(
+ JSContext* cx, Handle<PlainObject*> templateObject) {
+ MOZ_ASSERT(cx->realm() != templateObject->realm(),
+ "Use createWithTemplate() for same-realm objects");
+
+ // Currently only implemented for null-proto.
+ MOZ_ASSERT(templateObject->staticPrototype() == nullptr);
+
+ // The object mustn't be in dictionary mode.
+ MOZ_ASSERT(!templateObject->shape()->isDictionary());
+
+ TaggedProto proto = TaggedProto(nullptr);
+ SharedShape* templateShape = templateObject->sharedShape();
+ Rooted<SharedPropMap*> map(cx, templateShape->propMap());
+
+ Rooted<SharedShape*> shape(
+ cx, SharedShape::getInitialOrPropMapShape(
+ cx, &PlainObject::class_, cx->realm(), proto,
+ templateShape->numFixedSlots(), map,
+ templateShape->propMapLength(), templateShape->objectFlags()));
+ if (!shape) {
+ return nullptr;
+ }
+ return createWithShape(cx, shape);
+}
+
+// static
+SharedShape* GlobalObject::createPlainObjectShapeWithDefaultProto(
+ JSContext* cx, gc::AllocKind kind) {
+ PlainObjectSlotsKind slotsKind = PlainObjectSlotsKindFromAllocKind(kind);
+ HeapPtr<SharedShape*>& shapeRef =
+ cx->global()->data().plainObjectShapesWithDefaultProto[slotsKind];
+ MOZ_ASSERT(!shapeRef);
+
+ JSObject* proto = &cx->global()->getObjectPrototype();
+ SharedShape* shape = GetPlainObjectShapeWithProto(cx, proto, kind);
+ if (!shape) {
+ return nullptr;
+ }
+
+ shapeRef.init(shape);
+ return shape;
+}
+
+PlainObject* js::NewPlainObject(JSContext* cx, NewObjectKind newKind) {
+ constexpr gc::AllocKind allocKind = gc::AllocKind::OBJECT0;
+ MOZ_ASSERT(gc::GetGCObjectKind(&PlainObject::class_) == allocKind);
+
+ Rooted<SharedShape*> shape(
+ cx, GlobalObject::getPlainObjectShapeWithDefaultProto(cx, allocKind));
+ if (!shape) {
+ return nullptr;
+ }
+
+ return PlainObject::createWithShape(cx, shape, allocKind, newKind);
+}
+
+PlainObject* js::NewPlainObjectWithAllocKind(JSContext* cx,
+ gc::AllocKind allocKind,
+ NewObjectKind newKind) {
+ Rooted<SharedShape*> shape(
+ cx, GlobalObject::getPlainObjectShapeWithDefaultProto(cx, allocKind));
+ if (!shape) {
+ return nullptr;
+ }
+
+ return PlainObject::createWithShape(cx, shape, allocKind, newKind);
+}
+
+PlainObject* js::NewPlainObjectWithProto(JSContext* cx, HandleObject proto,
+ NewObjectKind newKind) {
+ // Use a faster path if |proto| is %Object.prototype% (the common case).
+ if (proto && proto == cx->global()->maybeGetPrototype(JSProto_Object)) {
+ return NewPlainObject(cx, newKind);
+ }
+
+ constexpr gc::AllocKind allocKind = gc::AllocKind::OBJECT0;
+ MOZ_ASSERT(gc::GetGCObjectKind(&PlainObject::class_) == allocKind);
+
+ Rooted<SharedShape*> shape(
+ cx, GetPlainObjectShapeWithProto(cx, proto, allocKind));
+ if (!shape) {
+ return nullptr;
+ }
+
+ return PlainObject::createWithShape(cx, shape, allocKind, newKind);
+}
+
+PlainObject* js::NewPlainObjectWithProtoAndAllocKind(JSContext* cx,
+ HandleObject proto,
+ gc::AllocKind allocKind,
+ NewObjectKind newKind) {
+ // Use a faster path if |proto| is %Object.prototype% (the common case).
+ if (proto && proto == cx->global()->maybeGetPrototype(JSProto_Object)) {
+ return NewPlainObjectWithAllocKind(cx, allocKind, newKind);
+ }
+
+ Rooted<SharedShape*> shape(
+ cx, GetPlainObjectShapeWithProto(cx, proto, allocKind));
+ if (!shape) {
+ return nullptr;
+ }
+
+ return PlainObject::createWithShape(cx, shape, allocKind, newKind);
+}
+
+void js::NewPlainObjectWithPropsCache::add(SharedShape* shape) {
+ MOZ_ASSERT(shape);
+ MOZ_ASSERT(shape->slotSpan() > 0);
+ for (size_t i = NumEntries - 1; i > 0; i--) {
+ entries_[i] = entries_[i - 1];
+ }
+ entries_[0] = shape;
+}
+
+static bool ShapeMatches(IdValuePair* properties, size_t nproperties,
+ SharedShape* shape) {
+ if (shape->slotSpan() != nproperties) {
+ return false;
+ }
+ SharedShapePropertyIter<NoGC> iter(shape);
+ for (size_t i = nproperties; i > 0; i--) {
+ MOZ_ASSERT(iter->isDataProperty());
+ MOZ_ASSERT(iter->flags() == PropertyFlags::defaultDataPropFlags);
+ if (properties[i - 1].id != iter->key()) {
+ return false;
+ }
+ iter++;
+ }
+ MOZ_ASSERT(iter.done());
+ return true;
+}
+
+SharedShape* js::NewPlainObjectWithPropsCache::lookup(
+ IdValuePair* properties, size_t nproperties) const {
+ for (size_t i = 0; i < NumEntries; i++) {
+ SharedShape* shape = entries_[i];
+ if (shape && ShapeMatches(properties, nproperties, shape)) {
+ return shape;
+ }
+ }
+ return nullptr;
+}
+
+enum class KeysKind { UniqueNames, Unknown };
+
+template <KeysKind Kind>
+static PlainObject* NewPlainObjectWithProperties(JSContext* cx,
+ IdValuePair* properties,
+ size_t nproperties) {
+ auto& cache = cx->realm()->newPlainObjectWithPropsCache;
+
+ // If we recently created an object with these properties, we can use that
+ // Shape directly.
+ if (SharedShape* shape = cache.lookup(properties, nproperties)) {
+ Rooted<SharedShape*> shapeRoot(cx, shape);
+ PlainObject* obj = PlainObject::createWithShape(cx, shapeRoot);
+ if (!obj) {
+ return nullptr;
+ }
+ MOZ_ASSERT(obj->slotSpan() == nproperties);
+ for (size_t i = 0; i < nproperties; i++) {
+ obj->initSlot(i, properties[i].value);
+ }
+ return obj;
+ }
+
+ gc::AllocKind allocKind = gc::GetGCObjectKind(nproperties);
+ Rooted<PlainObject*> obj(cx, NewPlainObjectWithAllocKind(cx, allocKind));
+ if (!obj) {
+ return nullptr;
+ }
+
+ if (nproperties == 0) {
+ return obj;
+ }
+
+ Rooted<PropertyKey> key(cx);
+ Rooted<Value> value(cx);
+ bool canCache = true;
+
+ for (size_t i = 0; i < nproperties; i++) {
+ key = properties[i].id;
+ value = properties[i].value;
+
+ // Integer keys may need to be stored in dense elements. This is uncommon so
+ // just fall back to NativeDefineDataProperty.
+ if constexpr (Kind == KeysKind::Unknown) {
+ if (MOZ_UNLIKELY(key.isInt())) {
+ canCache = false;
+ if (!NativeDefineDataProperty(cx, obj, key, value, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ continue;
+ }
+ }
+
+ MOZ_ASSERT(key.isAtom() || key.isSymbol());
+
+ // Check for duplicate keys. In this case we must overwrite the earlier
+ // property value.
+ if constexpr (Kind == KeysKind::UniqueNames) {
+ MOZ_ASSERT(!obj->containsPure(key));
+ } else {
+ mozilla::Maybe<PropertyInfo> prop = obj->lookup(cx, key);
+ if (MOZ_UNLIKELY(prop)) {
+ canCache = false;
+ MOZ_ASSERT(prop->isDataProperty());
+ obj->setSlot(prop->slot(), value);
+ continue;
+ }
+ }
+
+ if (!AddDataPropertyToPlainObject(cx, obj, key, value)) {
+ return nullptr;
+ }
+ }
+
+ if (canCache && !obj->inDictionaryMode()) {
+ MOZ_ASSERT(obj->getDenseInitializedLength() == 0);
+ MOZ_ASSERT(obj->slotSpan() == nproperties);
+ cache.add(obj->sharedShape());
+ }
+
+ return obj;
+}
+
+PlainObject* js::NewPlainObjectWithUniqueNames(JSContext* cx,
+ IdValuePair* properties,
+ size_t nproperties) {
+ return NewPlainObjectWithProperties<KeysKind::UniqueNames>(cx, properties,
+ nproperties);
+}
+
+PlainObject* js::NewPlainObjectWithMaybeDuplicateKeys(JSContext* cx,
+ IdValuePair* properties,
+ size_t nproperties) {
+ return NewPlainObjectWithProperties<KeysKind::Unknown>(cx, properties,
+ nproperties);
+}
diff --git a/js/src/vm/PlainObject.h b/js/src/vm/PlainObject.h
new file mode 100644
index 0000000000..86e086e954
--- /dev/null
+++ b/js/src/vm/PlainObject.h
@@ -0,0 +1,111 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_PlainObject_h
+#define vm_PlainObject_h
+
+#include "gc/AllocKind.h" // js::gc::AllocKind
+#include "js/Class.h" // JSClass
+#include "js/RootingAPI.h" // JS::Handle
+#include "vm/JSObject.h" // js::NewObjectKind
+#include "vm/NativeObject.h" // js::NativeObject
+
+struct JS_PUBLIC_API JSContext;
+class JS_PUBLIC_API JSFunction;
+class JS_PUBLIC_API JSObject;
+
+namespace js {
+
+struct IdValuePair;
+
+// Object class for plain native objects created using '{}' object literals,
+// 'new Object()', 'Object.create', etc.
+class PlainObject : public NativeObject {
+ public:
+ static const JSClass class_;
+
+ private:
+#ifdef DEBUG
+ void assertHasNoNonWritableOrAccessorPropExclProto() const;
+#endif
+
+ public:
+ static inline js::PlainObject* createWithShape(JSContext* cx,
+ JS::Handle<SharedShape*> shape,
+ gc::AllocKind kind,
+ NewObjectKind newKind);
+
+ static inline js::PlainObject* createWithShape(
+ JSContext* cx, JS::Handle<SharedShape*> shape,
+ NewObjectKind newKind = GenericObject);
+
+ static inline PlainObject* createWithTemplate(
+ JSContext* cx, JS::Handle<PlainObject*> templateObject);
+
+ static js::PlainObject* createWithTemplateFromDifferentRealm(
+ JSContext* cx, JS::Handle<PlainObject*> templateObject);
+
+ /* Return the allocKind we would use if we were to tenure this object. */
+ inline gc::AllocKind allocKindForTenure() const;
+
+ bool hasNonWritableOrAccessorPropExclProto() const {
+ if (hasFlag(ObjectFlag::HasNonWritableOrAccessorPropExclProto)) {
+ return true;
+ }
+#ifdef DEBUG
+ assertHasNoNonWritableOrAccessorPropExclProto();
+#endif
+ return false;
+ }
+};
+
+// Specializations of 7.3.23 CopyDataProperties(...) for NativeObjects.
+extern bool CopyDataPropertiesNative(JSContext* cx,
+ JS::Handle<PlainObject*> target,
+ JS::Handle<NativeObject*> from,
+ JS::Handle<PlainObject*> excludedItems,
+ bool* optimized);
+
+// Specialized call to get the shape to use when creating |this| for a known
+// function callee.
+extern SharedShape* ThisShapeForFunction(JSContext* cx,
+ JS::Handle<JSFunction*> callee,
+ JS::Handle<JSObject*> newTarget);
+
+// Create a new PlainObject with %Object.prototype% as prototype.
+extern PlainObject* NewPlainObject(JSContext* cx,
+ NewObjectKind newKind = GenericObject);
+
+// Like NewPlainObject, but uses the given AllocKind. This allows creating an
+// object with fixed slots available for properties.
+extern PlainObject* NewPlainObjectWithAllocKind(
+ JSContext* cx, gc::AllocKind allocKind,
+ NewObjectKind newKind = GenericObject);
+
+// Create a new PlainObject with the given |proto| as prototype.
+extern PlainObject* NewPlainObjectWithProto(
+ JSContext* cx, HandleObject proto, NewObjectKind newKind = GenericObject);
+
+// Like NewPlainObjectWithProto, but uses the given AllocKind. This allows
+// creating an object with fixed slots available for properties.
+extern PlainObject* NewPlainObjectWithProtoAndAllocKind(
+ JSContext* cx, HandleObject proto, gc::AllocKind allocKind,
+ NewObjectKind newKind = GenericObject);
+
+// Create a plain object with the given properties. The list must not contain
+// duplicate keys or integer keys.
+extern PlainObject* NewPlainObjectWithUniqueNames(JSContext* cx,
+ IdValuePair* properties,
+ size_t nproperties);
+
+// Create a plain object with the given properties. The list may contain integer
+// keys or duplicate keys.
+extern PlainObject* NewPlainObjectWithMaybeDuplicateKeys(
+ JSContext* cx, IdValuePair* properties, size_t nproperties);
+
+} // namespace js
+
+#endif // vm_PlainObject_h
diff --git a/js/src/vm/Printer.cpp b/js/src/vm/Printer.cpp
new file mode 100644
index 0000000000..1cfdca01a4
--- /dev/null
+++ b/js/src/vm/Printer.cpp
@@ -0,0 +1,559 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/Printer.h"
+
+#include "mozilla/PodOperations.h"
+#include "mozilla/Printf.h"
+#include "mozilla/RangedPtr.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "ds/LifoAlloc.h"
+#include "js/CharacterEncoding.h"
+#include "util/Memory.h"
+#include "util/Text.h"
+#include "util/WindowsWrapper.h"
+#include "vm/StringType.h"
+
+using mozilla::PodCopy;
+
+namespace {
+
+class GenericPrinterPrintfTarget : public mozilla::PrintfTarget {
+ public:
+ explicit GenericPrinterPrintfTarget(js::GenericPrinter& p) : printer(p) {}
+
+ bool append(const char* sp, size_t len) override {
+ return printer.put(sp, len);
+ }
+
+ private:
+ js::GenericPrinter& printer;
+};
+
+} // namespace
+
+namespace js {
+
+void GenericPrinter::reportOutOfMemory() {
+ if (hadOOM_) {
+ return;
+ }
+ hadOOM_ = true;
+}
+
+bool GenericPrinter::hadOutOfMemory() const { return hadOOM_; }
+
+bool GenericPrinter::printf(const char* fmt, ...) {
+ va_list va;
+ va_start(va, fmt);
+ bool r = vprintf(fmt, va);
+ va_end(va);
+ return r;
+}
+
+bool GenericPrinter::vprintf(const char* fmt, va_list ap) {
+ // Simple shortcut to avoid allocating strings.
+ if (strchr(fmt, '%') == nullptr) {
+ return put(fmt);
+ }
+
+ GenericPrinterPrintfTarget printer(*this);
+ if (!printer.vprint(fmt, ap)) {
+ reportOutOfMemory();
+ return false;
+ }
+ return true;
+}
+
+const size_t Sprinter::DefaultSize = 64;
+
+bool Sprinter::realloc_(size_t newSize) {
+ MOZ_ASSERT(newSize > (size_t)offset);
+ char* newBuf = (char*)js_realloc(base, newSize);
+ if (!newBuf) {
+ reportOutOfMemory();
+ return false;
+ }
+ base = newBuf;
+ size = newSize;
+ base[size - 1] = '\0';
+ return true;
+}
+
+Sprinter::Sprinter(JSContext* maybeCx, bool shouldReportOOM)
+ : maybeCx(maybeCx),
+#ifdef DEBUG
+ initialized(false),
+#endif
+ shouldReportOOM(maybeCx && shouldReportOOM),
+ base(nullptr),
+ size(0),
+ offset(0) {
+}
+
+Sprinter::~Sprinter() {
+#ifdef DEBUG
+ if (initialized) {
+ checkInvariants();
+ }
+#endif
+ js_free(base);
+}
+
+bool Sprinter::init() {
+ MOZ_ASSERT(!initialized);
+ base = js_pod_malloc<char>(DefaultSize);
+ if (!base) {
+ reportOutOfMemory();
+ return false;
+ }
+#ifdef DEBUG
+ initialized = true;
+#endif
+ *base = '\0';
+ size = DefaultSize;
+ base[size - 1] = '\0';
+ return true;
+}
+
+void Sprinter::checkInvariants() const {
+ MOZ_ASSERT(initialized);
+ MOZ_ASSERT((size_t)offset < size);
+ MOZ_ASSERT(base[size - 1] == '\0');
+}
+
+UniqueChars Sprinter::release() {
+ checkInvariants();
+ if (hadOOM_) {
+ return nullptr;
+ }
+
+ char* str = base;
+ base = nullptr;
+ offset = size = 0;
+#ifdef DEBUG
+ initialized = false;
+#endif
+ return UniqueChars(str);
+}
+
+char* Sprinter::stringAt(ptrdiff_t off) const {
+ MOZ_ASSERT(off >= 0 && (size_t)off < size);
+ return base + off;
+}
+
+char& Sprinter::operator[](size_t off) {
+ MOZ_ASSERT(off < size);
+ return *(base + off);
+}
+
+char* Sprinter::reserve(size_t len) {
+ InvariantChecker ic(this);
+
+ while (len + 1 > size - offset) { /* Include trailing \0 */
+ if (!realloc_(size * 2)) {
+ return nullptr;
+ }
+ }
+
+ char* sb = base + offset;
+ offset += len;
+ return sb;
+}
+
+bool Sprinter::put(const char* s, size_t len) {
+ InvariantChecker ic(this);
+
+ const char* oldBase = base;
+ const char* oldEnd = base + size;
+
+ char* bp = reserve(len);
+ if (!bp) {
+ return false;
+ }
+
+ /* s is within the buffer already */
+ if (s >= oldBase && s < oldEnd) {
+ /* buffer was realloc'ed */
+ if (base != oldBase) {
+ s = stringAt(s - oldBase); /* this is where it lives now */
+ }
+ memmove(bp, s, len);
+ } else {
+ js_memcpy(bp, s, len);
+ }
+
+ bp[len] = '\0';
+ return true;
+}
+
+bool Sprinter::putString(JSString* s) {
+ MOZ_ASSERT(maybeCx);
+ InvariantChecker ic(this);
+
+ JSLinearString* linear = s->ensureLinear(maybeCx);
+ if (!linear) {
+ return false;
+ }
+
+ size_t length = JS::GetDeflatedUTF8StringLength(linear);
+
+ char* buffer = reserve(length);
+ if (!buffer) {
+ return false;
+ }
+
+ mozilla::DebugOnly<size_t> written =
+ JS::DeflateStringToUTF8Buffer(linear, mozilla::Span(buffer, length));
+ MOZ_ASSERT(written == length);
+
+ buffer[length] = '\0';
+ return true;
+}
+
+ptrdiff_t Sprinter::getOffset() const { return offset; }
+
+void Sprinter::reportOutOfMemory() {
+ if (hadOOM_) {
+ return;
+ }
+ if (maybeCx && shouldReportOOM) {
+ ReportOutOfMemory(maybeCx);
+ }
+ hadOOM_ = true;
+}
+
+bool Sprinter::jsprintf(const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+
+ bool r = vprintf(format, ap);
+ va_end(ap);
+
+ return r;
+}
+
+const char js_EscapeMap[] = {
+ // clang-format off
+ '\b', 'b',
+ '\f', 'f',
+ '\n', 'n',
+ '\r', 'r',
+ '\t', 't',
+ '\v', 'v',
+ '"', '"',
+ '\'', '\'',
+ '\\', '\\',
+ '\0'
+ // clang-format on
+};
+
+static const char JSONEscapeMap[] = {
+ // clang-format off
+ '\b', 'b',
+ '\f', 'f',
+ '\n', 'n',
+ '\r', 'r',
+ '\t', 't',
+ '"', '"',
+ '\\', '\\',
+ '\0'
+ // clang-format on
+};
+
+template <QuoteTarget target, typename CharT>
+JS_PUBLIC_API bool QuoteString(Sprinter* sp,
+ const mozilla::Range<const CharT> chars,
+ char quote) {
+ MOZ_ASSERT_IF(target == QuoteTarget::JSON, quote == '\0');
+
+ using CharPtr = mozilla::RangedPtr<const CharT>;
+
+ const char* escapeMap =
+ (target == QuoteTarget::String) ? js_EscapeMap : JSONEscapeMap;
+
+ if (quote) {
+ if (!sp->putChar(quote)) {
+ return false;
+ }
+ }
+
+ const CharPtr end = chars.end();
+
+ /* Loop control variables: end points at end of string sentinel. */
+ for (CharPtr t = chars.begin(); t < end; ++t) {
+ /* Move t forward from s past un-quote-worthy characters. */
+ const CharPtr s = t;
+ char16_t c = *t;
+ while (c < 127 && c != '\\') {
+ if (target == QuoteTarget::String) {
+ if (!IsAsciiPrintable(c) || c == quote || c == '\t') {
+ break;
+ }
+ } else {
+ if (c < ' ' || c == '"') {
+ break;
+ }
+ }
+
+ ++t;
+ if (t == end) {
+ break;
+ }
+ c = *t;
+ }
+
+ {
+ ptrdiff_t len = t - s;
+ ptrdiff_t base = sp->getOffset();
+ if (!sp->reserve(len)) {
+ return false;
+ }
+
+ for (ptrdiff_t i = 0; i < len; ++i) {
+ (*sp)[base + i] = char(s[i]);
+ }
+ (*sp)[base + len] = '\0';
+ }
+
+ if (t == end) {
+ break;
+ }
+
+ /* Use escapeMap, \u, or \x only if necessary. */
+ const char* escape;
+ if (!(c >> 8) && c != 0 &&
+ (escape = strchr(escapeMap, int(c))) != nullptr) {
+ if (!sp->jsprintf("\\%c", escape[1])) {
+ return false;
+ }
+ } else {
+ /*
+ * Use \x only if the high byte is 0 and we're in a quoted string,
+ * because ECMA-262 allows only \u, not \x, in Unicode identifiers
+ * (see bug 621814).
+ */
+ if (!sp->jsprintf((quote && !(c >> 8)) ? "\\x%02X" : "\\u%04X", c)) {
+ return false;
+ }
+ }
+ }
+
+ /* Sprint the closing quote and return the quoted string. */
+ if (quote) {
+ if (!sp->putChar(quote)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template JS_PUBLIC_API bool QuoteString<QuoteTarget::String, Latin1Char>(
+ Sprinter* sp, const mozilla::Range<const Latin1Char> chars, char quote);
+
+template JS_PUBLIC_API bool QuoteString<QuoteTarget::String, char16_t>(
+ Sprinter* sp, const mozilla::Range<const char16_t> chars, char quote);
+
+template JS_PUBLIC_API bool QuoteString<QuoteTarget::JSON, Latin1Char>(
+ Sprinter* sp, const mozilla::Range<const Latin1Char> chars, char quote);
+
+template JS_PUBLIC_API bool QuoteString<QuoteTarget::JSON, char16_t>(
+ Sprinter* sp, const mozilla::Range<const char16_t> chars, char quote);
+
+JS_PUBLIC_API bool QuoteString(Sprinter* sp, JSString* str,
+ char quote /*= '\0' */) {
+ MOZ_ASSERT(sp->maybeCx);
+ JSLinearString* linear = str->ensureLinear(sp->maybeCx);
+ if (!linear) {
+ return false;
+ }
+
+ JS::AutoCheckCannotGC nogc;
+ return linear->hasLatin1Chars() ? QuoteString<QuoteTarget::String>(
+ sp, linear->latin1Range(nogc), quote)
+ : QuoteString<QuoteTarget::String>(
+ sp, linear->twoByteRange(nogc), quote);
+}
+
+JS_PUBLIC_API UniqueChars QuoteString(JSContext* cx, JSString* str,
+ char quote /* = '\0' */) {
+ Sprinter sprinter(cx);
+ if (!sprinter.init()) {
+ return nullptr;
+ }
+ if (!QuoteString(&sprinter, str, quote)) {
+ return nullptr;
+ }
+ return sprinter.release();
+}
+
+JS_PUBLIC_API bool JSONQuoteString(Sprinter* sp, JSString* str) {
+ MOZ_ASSERT(sp->maybeCx);
+ JSLinearString* linear = str->ensureLinear(sp->maybeCx);
+ if (!linear) {
+ return false;
+ }
+
+ JS::AutoCheckCannotGC nogc;
+ return linear->hasLatin1Chars() ? QuoteString<QuoteTarget::JSON>(
+ sp, linear->latin1Range(nogc), '\0')
+ : QuoteString<QuoteTarget::JSON>(
+ sp, linear->twoByteRange(nogc), '\0');
+}
+
+Fprinter::Fprinter(FILE* fp) : file_(nullptr), init_(false) { init(fp); }
+
+#ifdef DEBUG
+Fprinter::~Fprinter() { MOZ_ASSERT_IF(init_, !file_); }
+#endif
+
+bool Fprinter::init(const char* path) {
+ MOZ_ASSERT(!file_);
+ file_ = fopen(path, "w");
+ if (!file_) {
+ return false;
+ }
+ init_ = true;
+ return true;
+}
+
+void Fprinter::init(FILE* fp) {
+ MOZ_ASSERT(!file_);
+ file_ = fp;
+ init_ = false;
+}
+
+void Fprinter::flush() {
+ MOZ_ASSERT(file_);
+ fflush(file_);
+}
+
+void Fprinter::finish() {
+ MOZ_ASSERT(file_);
+ if (init_) {
+ fclose(file_);
+ }
+ file_ = nullptr;
+}
+
+bool Fprinter::put(const char* s, size_t len) {
+ MOZ_ASSERT(file_);
+ int i = fwrite(s, /*size=*/1, /*nitems=*/len, file_);
+ if (size_t(i) != len) {
+ reportOutOfMemory();
+ return false;
+ }
+#ifdef XP_WIN
+ if ((file_ == stderr) && (IsDebuggerPresent())) {
+ UniqueChars buf = DuplicateString(s, len);
+ if (!buf) {
+ reportOutOfMemory();
+ return false;
+ }
+ OutputDebugStringA(buf.get());
+ }
+#endif
+ return true;
+}
+
+LSprinter::LSprinter(LifoAlloc* lifoAlloc)
+ : alloc_(lifoAlloc), head_(nullptr), tail_(nullptr), unused_(0) {}
+
+LSprinter::~LSprinter() {
+ // This LSprinter might be allocated as part of the same LifoAlloc, so we
+ // should not expect the destructor to be called.
+}
+
+void LSprinter::exportInto(GenericPrinter& out) const {
+ if (!head_) {
+ return;
+ }
+
+ for (Chunk* it = head_; it != tail_; it = it->next) {
+ out.put(it->chars(), it->length);
+ }
+ out.put(tail_->chars(), tail_->length - unused_);
+}
+
+void LSprinter::clear() {
+ head_ = nullptr;
+ tail_ = nullptr;
+ unused_ = 0;
+ hadOOM_ = false;
+}
+
+bool LSprinter::put(const char* s, size_t len) {
+ // Compute how much data will fit in the current chunk.
+ size_t existingSpaceWrite = 0;
+ size_t overflow = len;
+ if (unused_ > 0 && tail_) {
+ existingSpaceWrite = std::min(unused_, len);
+ overflow = len - existingSpaceWrite;
+ }
+
+ // If necessary, allocate a new chunk for overflow data.
+ size_t allocLength = 0;
+ Chunk* last = nullptr;
+ if (overflow > 0) {
+ allocLength =
+ AlignBytes(sizeof(Chunk) + overflow, js::detail::LIFO_ALLOC_ALIGN);
+
+ LifoAlloc::AutoFallibleScope fallibleAllocator(alloc_);
+ last = reinterpret_cast<Chunk*>(alloc_->alloc(allocLength));
+ if (!last) {
+ reportOutOfMemory();
+ return false;
+ }
+ }
+
+ // All fallible operations complete: now fill up existing space, then
+ // overflow space in any new chunk.
+ MOZ_ASSERT(existingSpaceWrite + overflow == len);
+
+ if (existingSpaceWrite > 0) {
+ PodCopy(tail_->end() - unused_, s, existingSpaceWrite);
+ unused_ -= existingSpaceWrite;
+ s += existingSpaceWrite;
+ }
+
+ if (overflow > 0) {
+ if (tail_ && reinterpret_cast<char*>(last) == tail_->end()) {
+ // tail_ and last are consecutive in memory. LifoAlloc has no
+ // metadata and is just a bump allocator, so we can cheat by
+ // appending the newly-allocated space to tail_.
+ unused_ = allocLength;
+ tail_->length += allocLength;
+ } else {
+ // Remove the size of the header from the allocated length.
+ size_t availableSpace = allocLength - sizeof(Chunk);
+ last->next = nullptr;
+ last->length = availableSpace;
+
+ unused_ = availableSpace;
+ if (!head_) {
+ head_ = last;
+ } else {
+ tail_->next = last;
+ }
+
+ tail_ = last;
+ }
+
+ PodCopy(tail_->end() - unused_, s, overflow);
+
+ MOZ_ASSERT(unused_ >= overflow);
+ unused_ -= overflow;
+ }
+
+ MOZ_ASSERT(len <= INT_MAX);
+ return true;
+}
+
+} // namespace js
diff --git a/js/src/vm/Probes-inl.h b/js/src/vm/Probes-inl.h
new file mode 100644
index 0000000000..d0ad1e39fc
--- /dev/null
+++ b/js/src/vm/Probes-inl.h
@@ -0,0 +1,95 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Probes_inl_h
+#define vm_Probes_inl_h
+
+#include "vm/Probes.h"
+
+#include "vm/JSContext.h"
+#include "vm/JSScript.h"
+
+namespace js {
+
+/*
+ * Many probe handlers are implemented inline for minimal performance impact,
+ * especially important when no backends are enabled.
+ */
+
+inline bool probes::CallTrackingActive(JSContext* cx) {
+#ifdef INCLUDE_MOZILLA_DTRACE
+ if (JAVASCRIPT_FUNCTION_ENTRY_ENABLED() ||
+ JAVASCRIPT_FUNCTION_RETURN_ENABLED()) {
+ return true;
+ }
+#endif
+ return false;
+}
+
+inline bool probes::EnterScript(JSContext* cx, JSScript* script,
+ JSFunction* maybeFun, InterpreterFrame* fp) {
+#ifdef INCLUDE_MOZILLA_DTRACE
+ if (JAVASCRIPT_FUNCTION_ENTRY_ENABLED()) {
+ DTraceEnterJSFun(cx, maybeFun, script);
+ }
+#endif
+
+ JSRuntime* rt = cx->runtime();
+ if (rt->geckoProfiler().enabled()) {
+ if (!cx->geckoProfiler().enter(cx, script)) {
+ return false;
+ }
+ MOZ_ASSERT(!fp->hasPushedGeckoProfilerFrame());
+ fp->setPushedGeckoProfilerFrame();
+ }
+
+ return true;
+}
+
+inline void probes::ExitScript(JSContext* cx, JSScript* script,
+ JSFunction* maybeFun, bool popProfilerFrame) {
+#ifdef INCLUDE_MOZILLA_DTRACE
+ if (JAVASCRIPT_FUNCTION_RETURN_ENABLED()) {
+ DTraceExitJSFun(cx, maybeFun, script);
+ }
+#endif
+
+ if (popProfilerFrame) {
+ cx->geckoProfiler().exit(cx, script);
+ }
+}
+
+inline bool probes::StartExecution(JSScript* script) {
+ bool ok = true;
+
+#ifdef INCLUDE_MOZILLA_DTRACE
+ if (JAVASCRIPT_EXECUTE_START_ENABLED()) {
+ JAVASCRIPT_EXECUTE_START(
+ (script->filename() ? (char*)script->filename() : nullName),
+ script->lineno());
+ }
+#endif
+
+ return ok;
+}
+
+inline bool probes::StopExecution(JSScript* script) {
+ bool ok = true;
+
+#ifdef INCLUDE_MOZILLA_DTRACE
+ if (JAVASCRIPT_EXECUTE_DONE_ENABLED()) {
+ JAVASCRIPT_EXECUTE_DONE(
+ (script->filename() ? (char*)script->filename() : nullName),
+ script->lineno());
+ }
+#endif
+
+ return ok;
+}
+
+} /* namespace js */
+
+#endif /* vm_Probes_inl_h */
diff --git a/js/src/vm/Probes.cpp b/js/src/vm/Probes.cpp
new file mode 100644
index 0000000000..485f7cb9a4
--- /dev/null
+++ b/js/src/vm/Probes.cpp
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/Probes-inl.h"
+
+#ifdef INCLUDE_MOZILLA_DTRACE
+# include "vm/JSScript-inl.h"
+#endif
+
+using namespace js;
+
+const char probes::nullName[] = "(null)";
+const char probes::anonymousName[] = "(anonymous)";
+
+bool probes::ProfilingActive = true;
+
+#ifdef INCLUDE_MOZILLA_DTRACE
+static const char* ScriptFilename(const JSScript* script) {
+ if (!script) {
+ return probes::nullName;
+ }
+ if (!script->filename()) {
+ return probes::anonymousName;
+ }
+ return script->filename();
+}
+
+static const char* FunctionName(JSContext* cx, JSFunction* fun,
+ UniqueChars* bytes) {
+ if (!fun) {
+ return probes::nullName;
+ }
+ if (!fun->displayAtom()) {
+ return probes::anonymousName;
+ }
+ // TODO: Should be JS_EncodeStringToUTF8, but that'd introduce a rooting
+ // hazard, because JS_EncodeStringToUTF8 can GC.
+ *bytes = JS_EncodeStringToLatin1(cx, fun->displayAtom());
+ return *bytes ? bytes->get() : probes::nullName;
+}
+
+/*
+ * These functions call the DTrace macros for the JavaScript USDT probes.
+ * Originally this code was inlined in the JavaScript code; however since
+ * a number of operations are called, these have been placed into functions
+ * to reduce any negative compiler optimization effect that the addition of
+ * a number of usually unused lines of code would cause.
+ */
+void probes::DTraceEnterJSFun(JSContext* cx, JSFunction* fun,
+ JSScript* script) {
+ UniqueChars funNameBytes;
+ JAVASCRIPT_FUNCTION_ENTRY(ScriptFilename(script), probes::nullName,
+ FunctionName(cx, fun, &funNameBytes));
+}
+
+void probes::DTraceExitJSFun(JSContext* cx, JSFunction* fun, JSScript* script) {
+ UniqueChars funNameBytes;
+ JAVASCRIPT_FUNCTION_RETURN(ScriptFilename(script), probes::nullName,
+ FunctionName(cx, fun, &funNameBytes));
+}
+#endif
diff --git a/js/src/vm/Probes.h b/js/src/vm/Probes.h
new file mode 100644
index 0000000000..83c984e55c
--- /dev/null
+++ b/js/src/vm/Probes.h
@@ -0,0 +1,144 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Probes_h
+#define vm_Probes_h
+
+#ifdef INCLUDE_MOZILLA_DTRACE
+# include "javascript-trace.h"
+#endif
+
+#include "vm/JSObject.h"
+
+namespace js {
+
+class InterpreterFrame;
+
+namespace probes {
+
+/*
+ * Static probes
+ *
+ * The probe points defined in this file are scattered around the SpiderMonkey
+ * source tree. The presence of probes::SomeEvent() means that someEvent is
+ * about to happen or has happened. To the extent possible, probes should be
+ * inserted in all paths associated with a given event, regardless of the
+ * active runmode (interpreter/traceJIT/methodJIT/ionJIT).
+ *
+ * When a probe fires, it is handled by any probe handling backends that have
+ * been compiled in. By default, most probes do nothing or at least do nothing
+ * expensive, so the presence of the probe should have negligible effect on
+ * running time. (Probes in slow paths may do something by default, as long as
+ * there is no noticeable slowdown.)
+ *
+ * For some probes, the mere existence of the probe is too expensive even if it
+ * does nothing when called. For example, just having consistent information
+ * available for a function call entry/exit probe causes the JITs to
+ * de-optimize function calls. In those cases, the JITs may query at compile
+ * time whether a probe is desired, and omit the probe invocation if not. If a
+ * probe is runtime-disabled at compilation time, it is not guaranteed to fire
+ * within a compiled function if it is later enabled.
+ *
+ * Not all backends handle all of the probes listed here.
+ */
+
+/*
+ * Internal use only: remember whether "profiling", whatever that means, is
+ * currently active. Used for state management.
+ */
+extern bool ProfilingActive;
+
+extern const char nullName[];
+extern const char anonymousName[];
+
+/*
+ * Test whether we are tracking JS function call enter/exit. The JITs use this
+ * to decide whether they can optimize in a way that would prevent probes from
+ * firing.
+ */
+bool CallTrackingActive(JSContext*);
+
+/* Entering a JS function */
+bool EnterScript(JSContext*, JSScript*, JSFunction*, InterpreterFrame*);
+
+/* About to leave a JS function */
+void ExitScript(JSContext*, JSScript*, JSFunction*, bool popProfilerFrame);
+
+/* Executing a script */
+bool StartExecution(JSScript* script);
+
+/* Script has completed execution */
+bool StopExecution(JSScript* script);
+
+/*
+ * Object has been created. |obj| must exist (its class and size are read)
+ */
+bool CreateObject(JSContext* cx, JSObject* obj);
+
+/*
+ * Object is about to be finalized. |obj| must still exist (its class is
+ * read)
+ */
+bool FinalizeObject(JSObject* obj);
+
+/*
+ * Internal: DTrace-specific functions to be called during probes::EnterScript
+ * and probes::ExitScript. These will not be inlined, but the argument
+ * marshalling required for these probe points is expensive enough that it
+ * shouldn't really matter.
+ */
+void DTraceEnterJSFun(JSContext* cx, JSFunction* fun, JSScript* script);
+void DTraceExitJSFun(JSContext* cx, JSFunction* fun, JSScript* script);
+
+} // namespace probes
+
+#ifdef INCLUDE_MOZILLA_DTRACE
+static const char* ObjectClassname(JSObject* obj) {
+ if (!obj) {
+ return "(null object)";
+ }
+ const JSClass* clasp = obj->getClass();
+ if (!clasp) {
+ return "(null)";
+ }
+ const char* class_name = clasp->name;
+ if (!class_name) {
+ return "(null class name)";
+ }
+ return class_name;
+}
+#endif
+
+inline bool probes::CreateObject(JSContext* cx, JSObject* obj) {
+ bool ok = true;
+
+#ifdef INCLUDE_MOZILLA_DTRACE
+ if (JAVASCRIPT_OBJECT_CREATE_ENABLED()) {
+ JAVASCRIPT_OBJECT_CREATE(ObjectClassname(obj), (uintptr_t)obj);
+ }
+#endif
+
+ return ok;
+}
+
+inline bool probes::FinalizeObject(JSObject* obj) {
+ bool ok = true;
+
+#ifdef INCLUDE_MOZILLA_DTRACE
+ if (JAVASCRIPT_OBJECT_FINALIZE_ENABLED()) {
+ const JSClass* clasp = obj->getClass();
+
+ /* the first arg is nullptr - reserved for future use (filename?) */
+ JAVASCRIPT_OBJECT_FINALIZE(nullptr, (char*)clasp->name, (uintptr_t)obj);
+ }
+#endif
+
+ return ok;
+}
+
+} /* namespace js */
+
+#endif /* vm_Probes_h */
diff --git a/js/src/vm/ProfilingStack.cpp b/js/src/vm/ProfilingStack.cpp
new file mode 100644
index 0000000000..289bdd9d83
--- /dev/null
+++ b/js/src/vm/ProfilingStack.cpp
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/ProfilingStack.h"
+
+#include "mozilla/IntegerRange.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+
+using namespace js;
+
+ProfilingStack::~ProfilingStack() {
+ // The label macros keep a reference to the ProfilingStack to avoid a TLS
+ // access. If these are somehow not all cleared we will get a
+ // use-after-free so better to crash now.
+ MOZ_RELEASE_ASSERT(stackPointer == 0);
+
+ delete[] frames;
+}
+
+void ProfilingStack::ensureCapacitySlow() {
+ MOZ_ASSERT(stackPointer >= capacity);
+ const uint32_t kInitialCapacity = 4096 / sizeof(ProfilingStackFrame);
+
+ uint32_t sp = stackPointer;
+
+ uint32_t newCapacity;
+ if (!capacity) {
+ newCapacity = kInitialCapacity;
+ } else {
+ size_t memoryGoal =
+ mozilla::RoundUpPow2(capacity * 2 * sizeof(ProfilingStackFrame));
+ newCapacity = memoryGoal / sizeof(ProfilingStackFrame);
+ }
+ newCapacity = std::max(sp + 1, newCapacity);
+
+ auto* newFrames = new js::ProfilingStackFrame[newCapacity];
+
+ // It's important that `frames` / `capacity` / `stackPointer` remain
+ // consistent here at all times.
+ for (auto i : mozilla::IntegerRange(capacity)) {
+ newFrames[i] = frames[i];
+ }
+
+ js::ProfilingStackFrame* oldFrames = frames;
+ frames = newFrames;
+ capacity = newCapacity;
+ delete[] oldFrames;
+}
diff --git a/js/src/vm/PromiseLookup.cpp b/js/src/vm/PromiseLookup.cpp
new file mode 100644
index 0000000000..cd41716cc3
--- /dev/null
+++ b/js/src/vm/PromiseLookup.cpp
@@ -0,0 +1,273 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/PromiseLookup.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include "jspubtd.h" // JSProto_*
+
+#include "builtin/Promise.h" // js::Promise_then, js::Promise_static_resolve, js::Promise_static_species
+#include "js/HeapAPI.h" // js::gc::IsInsideNursery
+#include "js/Id.h" // SYMBOL_TO_JSID
+#include "js/Value.h" // JS::Value, JS::ObjectValue
+#include "util/Poison.h" // js::AlwaysPoison, JS_RESET_VALUE_PATTERN, MemCheckKind
+#include "vm/GlobalObject.h" // js::GlobalObject
+#include "vm/JSContext.h" // JSContext
+#include "vm/JSFunction.h" // JSFunction
+#include "vm/JSObject.h" // JSObject
+#include "vm/NativeObject.h" // js::NativeObject
+#include "vm/Runtime.h" // js::WellKnownSymbols
+#include "vm/Shape.h" // js::Shape
+
+#include "vm/JSObject-inl.h" // js::IsFunctionObject, js::IsNativeFunction
+
+using JS::ObjectValue;
+using JS::Value;
+
+using js::NativeObject;
+
+JSFunction* js::PromiseLookup::getPromiseConstructor(JSContext* cx) {
+ JSObject* obj = cx->global()->maybeGetConstructor(JSProto_Promise);
+ return obj ? &obj->as<JSFunction>() : nullptr;
+}
+
+NativeObject* js::PromiseLookup::getPromisePrototype(JSContext* cx) {
+ JSObject* obj = cx->global()->maybeGetPrototype(JSProto_Promise);
+ return obj ? &obj->as<NativeObject>() : nullptr;
+}
+
+bool js::PromiseLookup::isDataPropertyNative(JSContext* cx, NativeObject* obj,
+ uint32_t slot, JSNative native) {
+ JSFunction* fun;
+ if (!IsFunctionObject(obj->getSlot(slot), &fun)) {
+ return false;
+ }
+ return fun->maybeNative() == native && fun->realm() == cx->realm();
+}
+
+bool js::PromiseLookup::isAccessorPropertyNative(JSContext* cx,
+ NativeObject* holder,
+ uint32_t getterSlot,
+ JSNative native) {
+ JSObject* getter = holder->getGetter(getterSlot);
+ return getter && IsNativeFunction(getter, native) &&
+ getter->as<JSFunction>().realm() == cx->realm();
+}
+
+void js::PromiseLookup::initialize(JSContext* cx) {
+ MOZ_ASSERT(state_ == State::Uninitialized);
+
+ // Get the canonical Promise.prototype.
+ NativeObject* promiseProto = getPromisePrototype(cx);
+
+ // Check condition 1:
+ // Leave the cache uninitialized if the Promise class itself is not yet
+ // initialized.
+ if (!promiseProto) {
+ return;
+ }
+
+ // Get the canonical Promise constructor.
+ JSFunction* promiseCtor = getPromiseConstructor(cx);
+ MOZ_ASSERT(promiseCtor,
+ "The Promise constructor is initialized iff Promise.prototype is "
+ "initialized");
+
+ // Shortcut returns below means Promise[@@species] will never be
+ // optimizable, set to disabled now, and clear it later when we succeed.
+ state_ = State::Disabled;
+
+ // Check condition 2:
+ // Look up Promise.prototype.constructor and ensure it's a data property.
+ mozilla::Maybe<PropertyInfo> ctorProp =
+ promiseProto->lookup(cx, cx->names().constructor);
+ if (ctorProp.isNothing() || !ctorProp->isDataProperty()) {
+ return;
+ }
+
+ // Get the referred value, and ensure it holds the canonical Promise
+ // constructor.
+ JSFunction* ctorFun;
+ if (!IsFunctionObject(promiseProto->getSlot(ctorProp->slot()), &ctorFun)) {
+ return;
+ }
+ if (ctorFun != promiseCtor) {
+ return;
+ }
+
+ // Check condition 3:
+ // Look up Promise.prototype.then and ensure it's a data property.
+ mozilla::Maybe<PropertyInfo> thenProp =
+ promiseProto->lookup(cx, cx->names().then);
+ if (thenProp.isNothing() || !thenProp->isDataProperty()) {
+ return;
+ }
+
+ // Get the referred value, and ensure it holds the canonical "then"
+ // function.
+ if (!isDataPropertyNative(cx, promiseProto, thenProp->slot(), Promise_then)) {
+ return;
+ }
+
+ // Check condition 4:
+ // Look up the '@@species' value on Promise.
+ mozilla::Maybe<PropertyInfo> speciesProp = promiseCtor->lookup(
+ cx, PropertyKey::Symbol(cx->wellKnownSymbols().species));
+ if (speciesProp.isNothing() || !promiseCtor->hasGetter(*speciesProp)) {
+ return;
+ }
+
+ // Get the referred value, ensure it holds the canonical Promise[@@species]
+ // function.
+ uint32_t speciesGetterSlot = speciesProp->slot();
+ if (!isAccessorPropertyNative(cx, promiseCtor, speciesGetterSlot,
+ Promise_static_species)) {
+ return;
+ }
+
+ // Check condition 5:
+ // Look up Promise.resolve and ensure it's a data property.
+ mozilla::Maybe<PropertyInfo> resolveProp =
+ promiseCtor->lookup(cx, cx->names().resolve);
+ if (resolveProp.isNothing() || !resolveProp->isDataProperty()) {
+ return;
+ }
+
+ // Get the referred value, and ensure it holds the canonical "resolve"
+ // function.
+ if (!isDataPropertyNative(cx, promiseCtor, resolveProp->slot(),
+ Promise_static_resolve)) {
+ return;
+ }
+
+ // Store raw pointers below. This is okay to do here, because all objects
+ // are in the tenured heap.
+ MOZ_ASSERT(!gc::IsInsideNursery(promiseCtor->shape()));
+ MOZ_ASSERT(!gc::IsInsideNursery(promiseProto->shape()));
+
+ state_ = State::Initialized;
+ promiseConstructorShape_ = promiseCtor->shape();
+ promiseProtoShape_ = promiseProto->shape();
+ promiseSpeciesGetterSlot_ = speciesGetterSlot;
+ promiseResolveSlot_ = resolveProp->slot();
+ promiseProtoConstructorSlot_ = ctorProp->slot();
+ promiseProtoThenSlot_ = thenProp->slot();
+}
+
+void js::PromiseLookup::reset() {
+ AlwaysPoison(this, JS_RESET_VALUE_PATTERN, sizeof(*this),
+ MemCheckKind::MakeUndefined);
+ state_ = State::Uninitialized;
+}
+
+bool js::PromiseLookup::isPromiseStateStillSane(JSContext* cx) {
+ MOZ_ASSERT(state_ == State::Initialized);
+
+ NativeObject* promiseProto = getPromisePrototype(cx);
+ MOZ_ASSERT(promiseProto);
+
+ NativeObject* promiseCtor = getPromiseConstructor(cx);
+ MOZ_ASSERT(promiseCtor);
+
+ // Ensure that Promise.prototype still has the expected shape.
+ if (promiseProto->shape() != promiseProtoShape_) {
+ return false;
+ }
+
+ // Ensure that Promise still has the expected shape.
+ if (promiseCtor->shape() != promiseConstructorShape_) {
+ return false;
+ }
+
+ // Ensure that Promise.prototype.constructor is the canonical constructor.
+ if (promiseProto->getSlot(promiseProtoConstructorSlot_) !=
+ ObjectValue(*promiseCtor)) {
+ return false;
+ }
+
+ // Ensure that Promise.prototype.then is the canonical "then" function.
+ if (!isDataPropertyNative(cx, promiseProto, promiseProtoThenSlot_,
+ Promise_then)) {
+ return false;
+ }
+
+ // Ensure the species getter contains the canonical @@species function.
+ if (!isAccessorPropertyNative(cx, promiseCtor, promiseSpeciesGetterSlot_,
+ Promise_static_species)) {
+ return false;
+ }
+
+ // Ensure that Promise.resolve is the canonical "resolve" function.
+ if (!isDataPropertyNative(cx, promiseCtor, promiseResolveSlot_,
+ Promise_static_resolve)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool js::PromiseLookup::ensureInitialized(JSContext* cx,
+ Reinitialize reinitialize) {
+ if (state_ == State::Uninitialized) {
+ // If the cache is not initialized, initialize it.
+ initialize(cx);
+ } else if (state_ == State::Initialized) {
+ if (reinitialize == Reinitialize::Allowed) {
+ if (!isPromiseStateStillSane(cx)) {
+ // If the promise state is no longer sane, reinitialize.
+ reset();
+ initialize(cx);
+ }
+ } else {
+ // When we're not allowed to reinitialize, the promise state must
+ // still be sane if the cache is already initialized.
+ MOZ_ASSERT(isPromiseStateStillSane(cx));
+ }
+ }
+
+ // If the cache is disabled or still uninitialized, don't bother trying to
+ // optimize.
+ if (state_ != State::Initialized) {
+ return false;
+ }
+
+ // By the time we get here, we should have a sane promise state.
+ MOZ_ASSERT(isPromiseStateStillSane(cx));
+
+ return true;
+}
+
+bool js::PromiseLookup::isDefaultPromiseState(JSContext* cx) {
+ // Promise and Promise.prototype are in their default states iff the
+ // lookup cache was successfully initialized.
+ return ensureInitialized(cx, Reinitialize::Allowed);
+}
+
+bool js::PromiseLookup::hasDefaultProtoAndNoShadowedProperties(
+ JSContext* cx, PromiseObject* promise) {
+ // Ensure |promise|'s prototype is the actual Promise.prototype.
+ if (promise->staticPrototype() != getPromisePrototype(cx)) {
+ return false;
+ }
+
+ // Ensure |promise| doesn't define any own properties. This serves as a
+ // quick check to make sure |promise| doesn't define an own "constructor"
+ // or "then" property which may shadow Promise.prototype.constructor or
+ // Promise.prototype.then.
+ return promise->empty();
+}
+
+bool js::PromiseLookup::isDefaultInstance(JSContext* cx, PromiseObject* promise,
+ Reinitialize reinitialize) {
+ // Promise and Promise.prototype must be in their default states.
+ if (!ensureInitialized(cx, reinitialize)) {
+ return false;
+ }
+
+ // The object uses the default properties from Promise.prototype.
+ return hasDefaultProtoAndNoShadowedProperties(cx, promise);
+}
diff --git a/js/src/vm/PromiseLookup.h b/js/src/vm/PromiseLookup.h
new file mode 100644
index 0000000000..b88e1a95a8
--- /dev/null
+++ b/js/src/vm/PromiseLookup.h
@@ -0,0 +1,163 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_PromiseLookup_h
+#define vm_PromiseLookup_h
+
+#include "mozilla/Attributes.h" // MOZ_NON_TEMPORARY_CLASS, MOZ_INIT_OUTSIDE_CTOR
+
+#include <stdint.h> // uint8_t, uint32_t
+
+#include "js/CallArgs.h" // JSNative
+
+struct JS_PUBLIC_API JSContext;
+
+class JSFunction;
+
+namespace js {
+
+class NativeObject;
+class PromiseObject;
+class Shape;
+
+class MOZ_NON_TEMPORARY_CLASS PromiseLookup final {
+ // clang-format off
+ /*
+ * A PromiseLookup holds the following:
+ *
+ * Promise's shape (promiseConstructorShape_)
+ * To ensure that Promise has not been modified.
+ *
+ * Promise.prototype's shape (promiseProtoShape_)
+ * To ensure that Promise.prototype has not been modified.
+ *
+ * Promise's slot number for the @@species getter
+ * (promiseSpeciesGetterSlot_)
+ * To quickly retrieve the @@species getter for Promise.
+ *
+ * Promise's slot number for resolve (promiseResolveSlot_)
+ * To quickly retrieve the Promise.resolve function.
+ *
+ * Promise.prototype's slot number for constructor (promiseProtoConstructorSlot_)
+ * To quickly retrieve the Promise.prototype.constructor property.
+ *
+ * Promise.prototype's slot number for then (promiseProtoThenSlot_)
+ * To quickly retrieve the Promise.prototype.then function.
+ *
+ * MOZ_INIT_OUTSIDE_CTOR fields below are set in |initialize()|. The
+ * constructor only initializes a |state_| field, that defines whether the
+ * other fields are accessible.
+ */
+ // clang-format on
+
+ // Shape of matching Promise object.
+ MOZ_INIT_OUTSIDE_CTOR Shape* promiseConstructorShape_;
+
+ // Shape of matching Promise.prototype object.
+ MOZ_INIT_OUTSIDE_CTOR Shape* promiseProtoShape_;
+
+ // Slot number for the @@species property on the Promise constructor.
+ MOZ_INIT_OUTSIDE_CTOR uint32_t promiseSpeciesGetterSlot_;
+
+ // Slots Promise.resolve, Promise.prototype.constructor, and
+ // Promise.prototype.then.
+ MOZ_INIT_OUTSIDE_CTOR uint32_t promiseResolveSlot_;
+ MOZ_INIT_OUTSIDE_CTOR uint32_t promiseProtoConstructorSlot_;
+ MOZ_INIT_OUTSIDE_CTOR uint32_t promiseProtoThenSlot_;
+
+ enum class State : uint8_t {
+ // Flags marking the lazy initialization of the above fields.
+ Uninitialized,
+ Initialized,
+
+ // The disabled flag is set when we don't want to try optimizing
+ // anymore because core objects were changed.
+ Disabled
+ };
+
+ State state_ = State::Uninitialized;
+
+ // Initialize the internal fields.
+ //
+ // The cache is successfully initialized iff
+ // 1. Promise and Promise.prototype classes are initialized.
+ // 2. Promise.prototype.constructor is equal to Promise.
+ // 3. Promise.prototype.then is the original `then` function.
+ // 4. Promise[@@species] is the original @@species getter.
+ // 5. Promise.resolve is the original `resolve` function.
+ void initialize(JSContext* cx);
+
+ // Reset the cache.
+ void reset();
+
+ // Check if the global promise-related objects have not been messed with
+ // in a way that would disable this cache.
+ bool isPromiseStateStillSane(JSContext* cx);
+
+ // Flags to control whether or not ensureInitialized() is allowed to
+ // reinitialize the cache when the Promise state is no longer sane.
+ enum class Reinitialize : bool { Allowed, Disallowed };
+
+ // Return true if the lookup cache is properly initialized for usage.
+ bool ensureInitialized(JSContext* cx, Reinitialize reinitialize);
+
+ // Return true if the prototype of the given Promise object is
+ // Promise.prototype and the object doesn't shadow properties from
+ // Promise.prototype.
+ bool hasDefaultProtoAndNoShadowedProperties(JSContext* cx,
+ PromiseObject* promise);
+
+ // Return true if the given Promise object uses the default @@species,
+ // "constructor", and "then" properties.
+ bool isDefaultInstance(JSContext* cx, PromiseObject* promise,
+ Reinitialize reinitialize);
+
+ // Return the built-in Promise constructor or null if not yet initialized.
+ static JSFunction* getPromiseConstructor(JSContext* cx);
+
+ // Return the built-in Promise prototype or null if not yet initialized.
+ static NativeObject* getPromisePrototype(JSContext* cx);
+
+ // Return true if the slot contains the given native.
+ static bool isDataPropertyNative(JSContext* cx, NativeObject* obj,
+ uint32_t slot, JSNative native);
+
+ // Return true if the accessor shape contains the given native.
+ static bool isAccessorPropertyNative(JSContext* cx, NativeObject* holder,
+ uint32_t getterSlot, JSNative native);
+
+ public:
+ /** Construct a |PromiseSpeciesLookup| in the uninitialized state. */
+ PromiseLookup() { reset(); }
+
+ // Return true if the Promise constructor and Promise.prototype still use
+ // the default built-in functions.
+ bool isDefaultPromiseState(JSContext* cx);
+
+ // Return true if the given Promise object uses the default @@species,
+ // "constructor", and "then" properties.
+ bool isDefaultInstance(JSContext* cx, PromiseObject* promise) {
+ return isDefaultInstance(cx, promise, Reinitialize::Allowed);
+ }
+
+ // Return true if the given Promise object uses the default @@species,
+ // "constructor", and "then" properties.
+ bool isDefaultInstanceWhenPromiseStateIsSane(JSContext* cx,
+ PromiseObject* promise) {
+ return isDefaultInstance(cx, promise, Reinitialize::Disallowed);
+ }
+
+ // Purge the cache and all info associated with it.
+ void purge() {
+ if (state_ == State::Initialized) {
+ reset();
+ }
+ }
+};
+
+} // namespace js
+
+#endif // vm_PromiseLookup_h
diff --git a/js/src/vm/PromiseObject.h b/js/src/vm/PromiseObject.h
new file mode 100644
index 0000000000..7294724510
--- /dev/null
+++ b/js/src/vm/PromiseObject.h
@@ -0,0 +1,250 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_PromiseObject_h
+#define vm_PromiseObject_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include <stdint.h> // int32_t, uint64_t
+
+#include "js/Class.h" // JSClass
+#include "js/Promise.h" // JS::PromiseState
+#include "js/RootingAPI.h" // JS::{,Mutable}Handle
+#include "js/Value.h" // JS::Value, JS::Int32Value, JS::UndefinedHandleValue
+#include "vm/NativeObject.h" // js::NativeObject
+
+class JS_PUBLIC_API JSObject;
+
+namespace js {
+
+class SavedFrame;
+
+enum PromiseSlots {
+ // Int32 value with PROMISE_FLAG_* flags below.
+ PromiseSlot_Flags = 0,
+
+ // * if this promise is pending, reaction objects
+ // * undefined if there's no reaction
+ // * maybe-wrapped PromiseReactionRecord if there's only one reacion
+ // * dense array if there are two or more more reactions
+ // * if this promise is fulfilled, the resolution value
+ // * if this promise is rejected, the reason for the rejection
+ PromiseSlot_ReactionsOrResult,
+
+ // * if this promise is pending, resolve/reject functions.
+ // This slot holds only the reject function. The resolve function is
+ // reachable from the reject function's extended slot.
+ // * if this promise is either fulfilled or rejected, undefined
+ PromiseSlot_RejectFunction,
+
+ // Promise object's debug info, which is created on demand.
+ // * if this promise has no debug info, undefined
+ // * if this promise contains only its process-unique ID, the ID's number
+ // value
+ // * otherwise a PromiseDebugInfo object
+ PromiseSlot_DebugInfo,
+
+ PromiseSlots,
+};
+
+// This promise is either fulfilled or rejected.
+// If this flag is not set, this promise is pending.
+#define PROMISE_FLAG_RESOLVED 0x1
+
+// If this flag and PROMISE_FLAG_RESOLVED are set, this promise is fulfilled.
+// If only PROMISE_FLAG_RESOLVED is set, this promise is rejected.
+#define PROMISE_FLAG_FULFILLED 0x2
+
+// Indicates the promise has ever had a fulfillment or rejection handler;
+// used in unhandled rejection tracking.
+#define PROMISE_FLAG_HANDLED 0x4
+
+// This promise uses the default resolving functions.
+// The PromiseSlot_RejectFunction slot is not used.
+#define PROMISE_FLAG_DEFAULT_RESOLVING_FUNCTIONS 0x08
+
+// This promise's Promise Resolve Function's [[AlreadyResolved]].[[Value]] is
+// set to true.
+//
+// Valid only for promises with PROMISE_FLAG_DEFAULT_RESOLVING_FUNCTIONS.
+// For promises without PROMISE_FLAG_DEFAULT_RESOLVING_FUNCTIONS, Promise
+// Resolve/Reject Function's "Promise" slot represents the value.
+#define PROMISE_FLAG_DEFAULT_RESOLVING_FUNCTIONS_ALREADY_RESOLVED 0x10
+
+// This promise is either the return value of an async function invocation or
+// an async generator's method.
+#define PROMISE_FLAG_ASYNC 0x20
+
+// This promise knows how to propagate information required to keep track of
+// whether an activation behavior was in progress when the original promise in
+// the promise chain was created. This is a concept defined in the HTML spec:
+// https://html.spec.whatwg.org/multipage/interaction.html#triggered-by-user-activation
+// It is used by the embedder in order to request SpiderMonkey to keep track of
+// this information in a Promise, and also to propagate it to newly created
+// promises while processing Promise#then.
+#define PROMISE_FLAG_REQUIRES_USER_INTERACTION_HANDLING 0x40
+
+// This flag indicates whether an activation behavior was in progress when the
+// original promise in the promise chain was created. Activation behavior is a
+// concept defined by the HTML spec:
+// https://html.spec.whatwg.org/multipage/interaction.html#triggered-by-user-activation
+// This flag is only effective when the
+// PROMISE_FLAG_REQUIRES_USER_INTERACTION_HANDLING is set.
+#define PROMISE_FLAG_HAD_USER_INTERACTION_UPON_CREATION 0x80
+
+struct PromiseReactionRecordBuilder;
+
+class PromiseObject : public NativeObject {
+ public:
+ static const unsigned RESERVED_SLOTS = PromiseSlots;
+ static const JSClass class_;
+ static const JSClass protoClass_;
+ static PromiseObject* create(JSContext* cx, JS::Handle<JSObject*> executor,
+ JS::Handle<JSObject*> proto = nullptr,
+ bool needsWrapping = false);
+
+ static PromiseObject* createSkippingExecutor(JSContext* cx);
+
+ // Create an instance of the original Promise binding, rejected with the given
+ // value.
+ static PromiseObject* unforgeableReject(JSContext* cx,
+ JS::Handle<JS::Value> value);
+
+ // Create an instance of the original Promise binding, resolved with the given
+ // value.
+ //
+ // However, if |value| is itself a promise -- including from another realm --
+ // |value| itself will in some circumstances be returned. This sadly means
+ // this function must return |JSObject*| and can't return |PromiseObject*|.
+ static JSObject* unforgeableResolve(JSContext* cx,
+ JS::Handle<JS::Value> value);
+
+ // Create an instance of the original Promise binding, resolved with the given
+ // value *that is not a promise* -- from this realm/compartment or from any
+ // other.
+ //
+ // If you don't know for certain that your value will never be a promise, use
+ // |PromiseObject::unforgeableResolve| instead.
+ //
+ // Use |PromiseResolvedWithUndefined| (defined below) if your value is always
+ // |undefined|.
+ static PromiseObject* unforgeableResolveWithNonPromise(
+ JSContext* cx, JS::Handle<JS::Value> value);
+
+ int32_t flags() { return getFixedSlot(PromiseSlot_Flags).toInt32(); }
+
+ void setHandled() {
+ setFixedSlot(PromiseSlot_Flags,
+ JS::Int32Value(flags() | PROMISE_FLAG_HANDLED));
+ }
+
+ JS::PromiseState state() {
+ int32_t flags = this->flags();
+ if (!(flags & PROMISE_FLAG_RESOLVED)) {
+ MOZ_ASSERT(!(flags & PROMISE_FLAG_FULFILLED));
+ return JS::PromiseState::Pending;
+ }
+ if (flags & PROMISE_FLAG_FULFILLED) {
+ return JS::PromiseState::Fulfilled;
+ }
+ return JS::PromiseState::Rejected;
+ }
+
+ JS::Value reactions() {
+ MOZ_ASSERT(state() == JS::PromiseState::Pending);
+ return getFixedSlot(PromiseSlot_ReactionsOrResult);
+ }
+
+ JS::Value value() {
+ MOZ_ASSERT(state() == JS::PromiseState::Fulfilled);
+ return getFixedSlot(PromiseSlot_ReactionsOrResult);
+ }
+
+ JS::Value reason() {
+ MOZ_ASSERT(state() == JS::PromiseState::Rejected);
+ return getFixedSlot(PromiseSlot_ReactionsOrResult);
+ }
+
+ JS::Value valueOrReason() {
+ MOZ_ASSERT(state() != JS::PromiseState::Pending);
+ return getFixedSlot(PromiseSlot_ReactionsOrResult);
+ }
+
+ [[nodiscard]] static bool resolve(JSContext* cx,
+ JS::Handle<PromiseObject*> promise,
+ JS::Handle<JS::Value> resolutionValue);
+ [[nodiscard]] static bool reject(JSContext* cx,
+ JS::Handle<PromiseObject*> promise,
+ JS::Handle<JS::Value> rejectionValue);
+
+ static void onSettled(JSContext* cx, JS::Handle<PromiseObject*> promise,
+ JS::Handle<js::SavedFrame*> rejectionStack);
+
+ double allocationTime();
+ double resolutionTime();
+ JSObject* allocationSite();
+ JSObject* resolutionSite();
+ double lifetime();
+ double timeToResolution() {
+ MOZ_ASSERT(state() != JS::PromiseState::Pending);
+ return resolutionTime() - allocationTime();
+ }
+
+ [[nodiscard]] bool dependentPromises(
+ JSContext* cx, JS::MutableHandle<GCVector<Value>> values);
+
+ // Return the process-unique ID of this promise. Only used by the debugger.
+ uint64_t getID();
+
+ // Apply 'builder' to each reaction record in this promise's list. Used only
+ // by the Debugger API.
+ //
+ // The context cx need not be same-compartment with this promise. (In typical
+ // use, cx is in a debugger compartment, and this promise is in a debuggee
+ // compartment.) This function presents data to builder exactly as it appears
+ // in the reaction records, so the values passed to builder methods could
+ // potentially be cross-compartment with both cx and this promise.
+ //
+ // If this function encounters an error, it will report it to 'cx' and return
+ // false. If a builder call returns false, iteration stops, and this function
+ // returns false; the build should set an error on 'cx' as appropriate.
+ // Otherwise, this function returns true.
+ [[nodiscard]] bool forEachReactionRecord(
+ JSContext* cx, PromiseReactionRecordBuilder& builder);
+
+ bool isUnhandled() {
+ MOZ_ASSERT(state() == JS::PromiseState::Rejected);
+ return !(flags() & PROMISE_FLAG_HANDLED);
+ }
+
+ bool requiresUserInteractionHandling() {
+ return (flags() & PROMISE_FLAG_REQUIRES_USER_INTERACTION_HANDLING);
+ }
+
+ void setRequiresUserInteractionHandling(bool state);
+
+ bool hadUserInteractionUponCreation() {
+ return (flags() & PROMISE_FLAG_HAD_USER_INTERACTION_UPON_CREATION);
+ }
+
+ void setHadUserInteractionUponCreation(bool state);
+
+ void copyUserInteractionFlagsFrom(PromiseObject& rhs);
+};
+
+/**
+ * Create an instance of the original Promise binding, resolved with the value
+ * |undefined|.
+ */
+inline PromiseObject* PromiseResolvedWithUndefined(JSContext* cx) {
+ return PromiseObject::unforgeableResolveWithNonPromise(
+ cx, JS::UndefinedHandleValue);
+}
+
+} // namespace js
+
+#endif // vm_PromiseObject_h
diff --git a/js/src/vm/PropMap-inl.h b/js/src/vm/PropMap-inl.h
new file mode 100644
index 0000000000..f45e1abb4e
--- /dev/null
+++ b/js/src/vm/PropMap-inl.h
@@ -0,0 +1,251 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_PropMap_inl_h
+#define vm_PropMap_inl_h
+
+#include "vm/PropMap.h"
+
+#include "gc/Cell.h"
+#include "gc/Zone.h"
+#include "vm/JSContext.h"
+
+#include "gc/GCContext-inl.h"
+
+namespace js {
+
+inline AutoKeepPropMapTables::AutoKeepPropMapTables(JSContext* cx)
+ : cx_(cx), prev_(cx->zone()->keepPropMapTables()) {
+ cx->zone()->setKeepPropMapTables(true);
+}
+
+inline AutoKeepPropMapTables::~AutoKeepPropMapTables() {
+ cx_->zone()->setKeepPropMapTables(prev_);
+}
+
+// static
+MOZ_ALWAYS_INLINE PropMap* PropMap::lookupLinear(uint32_t mapLength,
+ PropertyKey key,
+ uint32_t* index) {
+ MOZ_ASSERT(mapLength > 0);
+ MOZ_ASSERT(mapLength <= Capacity);
+
+ // This function is very hot, so we use a macro to manually unroll the lookups
+ // below. Some compilers are able to unroll the equivalent loops, but they're
+ // not very consistent about this. The code below results in reasonable code
+ // with all compilers we tested.
+
+ static_assert(PropMap::Capacity == 8,
+ "Code below needs to change when capacity changes");
+
+#define LOOKUP_KEY(idx) \
+ if (mapLength > idx && getKey(idx) == key) { \
+ *index = idx; \
+ return this; \
+ }
+ LOOKUP_KEY(0);
+ LOOKUP_KEY(1);
+ LOOKUP_KEY(2);
+ LOOKUP_KEY(3);
+ LOOKUP_KEY(4);
+ LOOKUP_KEY(5);
+ LOOKUP_KEY(6);
+ LOOKUP_KEY(7);
+#undef LOOKUP_KEY
+
+ PropMap* map = this;
+ while (map->hasPrevious()) {
+ map = map->asLinked()->previous();
+#define LOOKUP_KEY(idx) \
+ if (map->getKey(idx) == key) { \
+ *index = idx; \
+ return map; \
+ }
+ LOOKUP_KEY(0);
+ LOOKUP_KEY(1);
+ LOOKUP_KEY(2);
+ LOOKUP_KEY(3);
+ LOOKUP_KEY(4);
+ LOOKUP_KEY(5);
+ LOOKUP_KEY(6);
+ LOOKUP_KEY(7);
+#undef LOOKUP_INDEX
+ }
+
+ return nullptr;
+}
+
+MOZ_ALWAYS_INLINE PropMap* PropMapTable::lookup(PropMap* map,
+ uint32_t mapLength,
+ PropertyKey key,
+ uint32_t* index) {
+ JS::AutoCheckCannotGC nogc;
+ MOZ_ASSERT(map->asLinked()->maybeTable(nogc) == this);
+
+ PropMapAndIndex entry;
+ if (lookupInCache(key, &entry)) {
+ if (entry.isNone()) {
+ return nullptr;
+ }
+ } else {
+ auto p = lookupRaw(key);
+ addToCache(key, p);
+ if (!p) {
+ return nullptr;
+ }
+ entry = *p;
+ }
+
+ // For the last map, only properties in [0, mapLength) are part of the object.
+ if (entry.map() == map && entry.index() >= mapLength) {
+ return nullptr;
+ }
+
+ *index = entry.index();
+ return entry.map();
+}
+
+// static
+MOZ_ALWAYS_INLINE PropMap* PropMap::lookupPure(uint32_t mapLength,
+ PropertyKey key,
+ uint32_t* index) {
+ if (canHaveTable()) {
+ JS::AutoCheckCannotGC nogc;
+ if (PropMapTable* table = asLinked()->maybeTable(nogc)) {
+ return table->lookup(this, mapLength, key, index);
+ }
+ }
+
+ return lookupLinear(mapLength, key, index);
+}
+
+// static
+MOZ_ALWAYS_INLINE PropMap* PropMap::lookup(JSContext* cx, uint32_t mapLength,
+ PropertyKey key, uint32_t* index) {
+ if (canHaveTable()) {
+ JS::AutoCheckCannotGC nogc;
+ if (PropMapTable* table = asLinked()->ensureTable(cx, nogc);
+ MOZ_LIKELY(table)) {
+ return table->lookup(this, mapLength, key, index);
+ }
+ // OOM. Do a linear lookup.
+ cx->recoverFromOutOfMemory();
+ }
+
+ return lookupLinear(mapLength, key, index);
+}
+
+// static
+inline void SharedPropMap::getPrevious(MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength) {
+ // Update the map/mapLength pointers to "remove" the last property. In most
+ // cases we can simply decrement *mapLength, but if *mapLength is 1 we have to
+ // either start at the previous map or set map/mapLength to nullptr/zero
+ // (if there is just one property).
+
+ MOZ_ASSERT(map);
+ MOZ_ASSERT(*mapLength > 0);
+
+ if (*mapLength > 1) {
+ *mapLength -= 1;
+ return;
+ }
+
+ if (map->hasPrevious()) {
+ map.set(map->asNormal()->previous());
+ *mapLength = PropMap::Capacity;
+ return;
+ }
+
+ map.set(nullptr);
+ *mapLength = 0;
+}
+
+// static
+inline bool PropMap::lookupForRemove(JSContext* cx, PropMap* map,
+ uint32_t mapLength, PropertyKey key,
+ const AutoKeepPropMapTables& keep,
+ PropMap** propMap, uint32_t* propIndex,
+ PropMapTable** table,
+ PropMapTable::Ptr* ptr) {
+ if (map->isDictionary()) {
+ *table = map->asLinked()->ensureTable(cx, keep);
+ if (!*table) {
+ return false;
+ }
+ *ptr = (*table)->lookupRaw(key);
+ *propMap = *ptr ? (*ptr)->map() : nullptr;
+ *propIndex = *ptr ? (*ptr)->index() : 0;
+ return true;
+ }
+
+ *table = nullptr;
+ *propMap = map->lookup(cx, mapLength, key, propIndex);
+ return true;
+}
+
+MOZ_ALWAYS_INLINE bool SharedPropMap::shouldConvertToDictionaryForAdd() const {
+ if (MOZ_LIKELY(numPreviousMaps() < NumPrevMapsConsiderDictionary)) {
+ return false;
+ }
+ if (numPreviousMaps() >= NumPrevMapsAlwaysDictionary) {
+ return true;
+ }
+
+ // More heuristics: if one of the last two maps has had a dictionary
+ // conversion before, or is branchy (indicated by parent != previous), convert
+ // to dictionary.
+ const SharedPropMap* curMap = this;
+ for (size_t i = 0; i < 2; i++) {
+ if (curMap->hadDictionaryConversion()) {
+ return true;
+ }
+ if (curMap->treeDataRef().parent.map() != curMap->asNormal()->previous()) {
+ return true;
+ }
+ curMap = curMap->asNormal()->previous();
+ }
+ return false;
+}
+
+inline void SharedPropMap::sweep(JS::GCContext* gcx) {
+ // We detach the child from the parent if the parent is reachable.
+ //
+ // This test depends on PropMap arenas not being freed until after we finish
+ // incrementally sweeping them. If that were not the case the parent pointer
+ // could point to a marked cell that had been deallocated and then
+ // reallocated, since allocating a cell in a zone that is being marked will
+ // set the mark bit for that cell.
+
+ MOZ_ASSERT(zone()->isGCSweeping());
+ MOZ_ASSERT_IF(hasPrevious(), asLinked()->previous()->zone() == zone());
+
+ SharedPropMapAndIndex parent = treeDataRef().parent;
+ if (!parent.isNone() && TenuredThingIsMarkedAny(parent.map())) {
+ parent.map()->removeChild(gcx, this);
+ }
+}
+
+inline void SharedPropMap::finalize(JS::GCContext* gcx) {
+ if (canHaveTable() && asLinked()->hasTable()) {
+ asLinked()->purgeTable(gcx);
+ }
+ if (hasChildrenSet()) {
+ SharedChildrenPtr& childrenRef = treeDataRef().children;
+ gcx->delete_(this, childrenRef.toChildrenSet(), MemoryUse::PropMapChildren);
+ childrenRef.setNone();
+ }
+}
+
+inline void DictionaryPropMap::finalize(JS::GCContext* gcx) {
+ if (asLinked()->hasTable()) {
+ asLinked()->purgeTable(gcx);
+ }
+}
+
+} // namespace js
+
+#endif /* vm_PropMap_inl_h */
diff --git a/js/src/vm/PropMap.cpp b/js/src/vm/PropMap.cpp
new file mode 100644
index 0000000000..851880f3e6
--- /dev/null
+++ b/js/src/vm/PropMap.cpp
@@ -0,0 +1,1233 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/PropMap-inl.h"
+
+#include "gc/Allocator.h"
+#include "gc/HashUtil.h"
+#include "js/GCVector.h"
+#include "vm/JSObject.h"
+
+#include "gc/GCContext-inl.h"
+#include "gc/Marking-inl.h"
+#include "vm/ObjectFlags-inl.h"
+
+using namespace js;
+
+void PropMap::addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* children, size_t* tables) const {
+ if (isShared() && asShared()->hasChildrenSet()) {
+ auto* set = asShared()->treeDataRef().children.toChildrenSet();
+ *children += set->shallowSizeOfIncludingThis(mallocSizeOf);
+ }
+ if (canHaveTable() && asLinked()->hasTable()) {
+ *tables += asLinked()->data_.table->sizeOfIncludingThis(mallocSizeOf);
+ }
+}
+
+// static
+SharedPropMap* SharedPropMap::create(JSContext* cx, Handle<SharedPropMap*> prev,
+ HandleId id, PropertyInfo prop) {
+ // If the first property has a slot number <= MaxSlotNumber, all properties
+ // added later will have a slot number <= CompactPropertyInfo::MaxSlotNumber
+ // so we can use a CompactPropMap.
+ static constexpr size_t MaxFirstSlot =
+ CompactPropertyInfo::MaxSlotNumber - (PropMap::Capacity - 1);
+
+ if (!prev && prop.maybeSlot() <= MaxFirstSlot) {
+ return cx->newCell<CompactPropMap>(id, prop);
+ }
+
+ return cx->newCell<NormalPropMap>(prev, id, prop);
+}
+
+// static
+SharedPropMap* SharedPropMap::createInitial(JSContext* cx, HandleId id,
+ PropertyInfo prop) {
+ // Lookup or create a shared map based on the first property.
+
+ using Lookup = InitialPropMapHasher::Lookup;
+
+ auto& table = cx->zone()->shapeZone().initialPropMaps;
+
+ auto p = MakeDependentAddPtr(cx, table, Lookup(id, prop));
+ if (p) {
+ return *p;
+ }
+
+ SharedPropMap* result = create(cx, /* prev = */ nullptr, id, prop);
+ if (!result) {
+ return nullptr;
+ }
+
+ Lookup lookup(id, prop);
+ if (!p.add(cx, table, lookup, result)) {
+ return nullptr;
+ }
+
+ return result;
+}
+
+// static
+SharedPropMap* SharedPropMap::clone(JSContext* cx, Handle<SharedPropMap*> map,
+ uint32_t length) {
+ MOZ_ASSERT(length > 0);
+
+ if (map->isCompact()) {
+ Rooted<CompactPropMap*> prev(cx, map->asCompact());
+ return cx->newCell<CompactPropMap>(prev, length);
+ }
+
+ Rooted<NormalPropMap*> prev(cx, map->asNormal());
+ return cx->newCell<NormalPropMap>(prev, length);
+}
+
+// static
+DictionaryPropMap* SharedPropMap::toDictionaryMap(JSContext* cx,
+ Handle<SharedPropMap*> map,
+ uint32_t length) {
+ // Starting at the last map, clone each shared map to a new dictionary map.
+
+ Rooted<DictionaryPropMap*> lastDictMap(cx);
+ Rooted<DictionaryPropMap*> nextDictMap(cx);
+
+ Rooted<SharedPropMap*> sharedMap(cx, map);
+ uint32_t sharedLength = length;
+ while (true) {
+ sharedMap->setHadDictionaryConversion();
+
+ DictionaryPropMap* dictMap;
+ if (sharedMap->isCompact()) {
+ Rooted<CompactPropMap*> prev(cx, sharedMap->asCompact());
+ dictMap = cx->newCell<DictionaryPropMap>(prev, sharedLength);
+ } else {
+ Rooted<NormalPropMap*> prev(cx, sharedMap->asNormal());
+ dictMap = cx->newCell<DictionaryPropMap>(prev, sharedLength);
+ }
+ if (!dictMap) {
+ return nullptr;
+ }
+
+ if (!lastDictMap) {
+ lastDictMap = dictMap;
+ }
+
+ if (nextDictMap) {
+ nextDictMap->initPrevious(dictMap);
+ }
+ nextDictMap = dictMap;
+
+ if (!sharedMap->hasPrevious()) {
+ break;
+ }
+ sharedMap = sharedMap->asNormal()->previous();
+ sharedLength = PropMap::Capacity;
+ }
+
+ return lastDictMap;
+}
+
+static MOZ_ALWAYS_INLINE SharedPropMap* PropMapChildReadBarrier(
+ SharedPropMap* parent, SharedPropMap* child) {
+ JS::Zone* zone = child->zone();
+ if (zone->needsIncrementalBarrier()) {
+ // We need a read barrier for the map tree, since these are weak
+ // pointers.
+ ReadBarrier(child);
+ return child;
+ }
+
+ if (MOZ_UNLIKELY(zone->isGCSweeping() &&
+ IsAboutToBeFinalizedUnbarriered(child))) {
+ // The map we've found is unreachable and due to be finalized, so
+ // remove our weak reference to it and don't use it.
+ MOZ_ASSERT(parent->isMarkedAny());
+ parent->removeChild(zone->runtimeFromMainThread()->gcContext(), child);
+ return nullptr;
+ }
+
+ // We don't yield to the mutator when the zone is in this state so we don't
+ // need to account for it here.
+ MOZ_ASSERT(!zone->isGCCompacting());
+
+ return child;
+}
+
+SharedPropMap* SharedPropMap::lookupChild(uint32_t length, HandleId id,
+ PropertyInfo prop) {
+ MOZ_ASSERT(length > 0);
+
+ SharedChildrenPtr children = treeDataRef().children;
+ if (children.isNone()) {
+ return nullptr;
+ }
+
+ if (!hasChildrenSet()) {
+ SharedPropMapAndIndex prevChild = children.toSingleChild();
+ if (prevChild.index() == length - 1) {
+ SharedPropMap* child = prevChild.map();
+ uint32_t newPropIndex = indexOfNextProperty(length - 1);
+ if (child->matchProperty(newPropIndex, id, prop)) {
+ return PropMapChildReadBarrier(this, child);
+ }
+ }
+ return nullptr;
+ }
+
+ SharedChildrenSet* set = children.toChildrenSet();
+ SharedChildrenHasher::Lookup lookup(id, prop, length - 1);
+ if (auto p = set->lookup(lookup)) {
+ MOZ_ASSERT(p->index() == length - 1);
+ SharedPropMap* child = p->map();
+ return PropMapChildReadBarrier(this, child);
+ }
+ return nullptr;
+}
+
+bool SharedPropMap::addChild(JSContext* cx, SharedPropMapAndIndex child,
+ HandleId id, PropertyInfo prop) {
+ SharedPropMap* childMap = child.map();
+
+#ifdef DEBUG
+ // If the parent map was full, the child map must have the parent as
+ // |previous| map. Else, the parent and child have the same |previous| map.
+ if (childMap->hasPrevious()) {
+ if (child.index() == PropMap::Capacity - 1) {
+ MOZ_ASSERT(childMap->asLinked()->previous() == this);
+ } else {
+ MOZ_ASSERT(childMap->asLinked()->previous() == asLinked()->previous());
+ }
+ } else {
+ MOZ_ASSERT(!hasPrevious());
+ }
+#endif
+
+ SharedChildrenPtr& childrenRef = treeDataRef().children;
+
+ if (childrenRef.isNone()) {
+ childrenRef.setSingleChild(child);
+ childMap->treeDataRef().setParent(this, child.index());
+ return true;
+ }
+
+ SharedChildrenHasher::Lookup lookup(id, prop, child.index());
+
+ if (hasChildrenSet()) {
+ if (!childrenRef.toChildrenSet()->putNew(lookup, child)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ } else {
+ auto hash = MakeUnique<SharedChildrenSet>();
+ if (!hash || !hash->reserve(2)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ SharedPropMapAndIndex firstChild = childrenRef.toSingleChild();
+ SharedPropMap* firstChildMap = firstChild.map();
+ uint32_t firstChildIndex = indexOfNextProperty(firstChild.index());
+ SharedChildrenHasher::Lookup lookupFirst(
+ firstChildMap->getPropertyInfoWithKey(firstChildIndex),
+ firstChild.index());
+ hash->putNewInfallible(lookupFirst, firstChild);
+ hash->putNewInfallible(lookup, child);
+
+ childrenRef.setChildrenSet(hash.release());
+ setHasChildrenSet();
+ AddCellMemory(this, sizeof(SharedChildrenSet), MemoryUse::PropMapChildren);
+ }
+
+ childMap->treeDataRef().setParent(this, child.index());
+ return true;
+}
+
+// static
+bool SharedPropMap::addProperty(JSContext* cx, const JSClass* clasp,
+ MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength, HandleId id,
+ PropertyFlags flags, ObjectFlags* objectFlags,
+ uint32_t* slot) {
+ MOZ_ASSERT(!flags.isCustomDataProperty());
+
+ *slot = SharedPropMap::slotSpan(clasp, map, *mapLength);
+
+ if (MOZ_UNLIKELY(*slot > SHAPE_MAXIMUM_SLOT)) {
+ ReportAllocationOverflow(cx);
+ return false;
+ }
+
+ *objectFlags =
+ GetObjectFlagsForNewProperty(clasp, *objectFlags, id, flags, cx);
+
+ PropertyInfo prop = PropertyInfo(flags, *slot);
+ return addPropertyInternal(cx, map, mapLength, id, prop);
+}
+
+// static
+bool SharedPropMap::addPropertyInReservedSlot(
+ JSContext* cx, const JSClass* clasp, MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength, HandleId id, PropertyFlags flags, uint32_t slot,
+ ObjectFlags* objectFlags) {
+ MOZ_ASSERT(!flags.isCustomDataProperty());
+
+ MOZ_ASSERT(slot < JSCLASS_RESERVED_SLOTS(clasp));
+ MOZ_ASSERT_IF(map, map->lastUsedSlot(*mapLength) < slot);
+
+ *objectFlags =
+ GetObjectFlagsForNewProperty(clasp, *objectFlags, id, flags, cx);
+
+ PropertyInfo prop = PropertyInfo(flags, slot);
+ return addPropertyInternal(cx, map, mapLength, id, prop);
+}
+
+// static
+bool SharedPropMap::addPropertyWithKnownSlot(JSContext* cx,
+ const JSClass* clasp,
+ MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength, HandleId id,
+ PropertyFlags flags, uint32_t slot,
+ ObjectFlags* objectFlags) {
+ MOZ_ASSERT(!flags.isCustomDataProperty());
+
+ if (MOZ_UNLIKELY(slot < JSCLASS_RESERVED_SLOTS(clasp))) {
+ return addPropertyInReservedSlot(cx, clasp, map, mapLength, id, flags, slot,
+ objectFlags);
+ }
+
+ MOZ_ASSERT(slot == SharedPropMap::slotSpan(clasp, map, *mapLength));
+ MOZ_RELEASE_ASSERT(slot <= SHAPE_MAXIMUM_SLOT);
+
+ *objectFlags =
+ GetObjectFlagsForNewProperty(clasp, *objectFlags, id, flags, cx);
+
+ PropertyInfo prop = PropertyInfo(flags, slot);
+ return addPropertyInternal(cx, map, mapLength, id, prop);
+}
+
+// static
+bool SharedPropMap::addCustomDataProperty(JSContext* cx, const JSClass* clasp,
+ MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength, HandleId id,
+ PropertyFlags flags,
+ ObjectFlags* objectFlags) {
+ MOZ_ASSERT(flags.isCustomDataProperty());
+
+ // Custom data properties don't have a slot. Copy the last property's slot
+ // number to simplify the implementation of SharedPropMap::slotSpan.
+ uint32_t slot = map ? map->lastUsedSlot(*mapLength) : SHAPE_INVALID_SLOT;
+
+ *objectFlags =
+ GetObjectFlagsForNewProperty(clasp, *objectFlags, id, flags, cx);
+
+ PropertyInfo prop = PropertyInfo(flags, slot);
+ return addPropertyInternal(cx, map, mapLength, id, prop);
+}
+
+// static
+bool SharedPropMap::addPropertyInternal(JSContext* cx,
+ MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength, HandleId id,
+ PropertyInfo prop) {
+ if (!map) {
+ // Adding the first property.
+ MOZ_ASSERT(*mapLength == 0);
+ map.set(SharedPropMap::createInitial(cx, id, prop));
+ if (!map) {
+ return false;
+ }
+ *mapLength = 1;
+ return true;
+ }
+
+ MOZ_ASSERT(*mapLength > 0);
+
+ if (*mapLength < PropMap::Capacity) {
+ // Use the next map entry if possible.
+ if (!map->hasKey(*mapLength)) {
+ if (map->canHaveTable()) {
+ JS::AutoCheckCannotGC nogc;
+ if (PropMapTable* table = map->asLinked()->maybeTable(nogc)) {
+ if (!table->add(cx, id, PropMapAndIndex(map, *mapLength))) {
+ return false;
+ }
+ }
+ }
+ map->initProperty(*mapLength, id, prop);
+ *mapLength += 1;
+ return true;
+ }
+ if (map->matchProperty(*mapLength, id, prop)) {
+ *mapLength += 1;
+ return true;
+ }
+
+ // The next entry can't be used so look up or create a child map. The child
+ // map is a clone of this map up to mapLength, with the new property stored
+ // as the next entry.
+
+ if (SharedPropMap* child = map->lookupChild(*mapLength, id, prop)) {
+ map.set(child);
+ *mapLength += 1;
+ return true;
+ }
+
+ SharedPropMap* child = SharedPropMap::clone(cx, map, *mapLength);
+ if (!child) {
+ return false;
+ }
+ child->initProperty(*mapLength, id, prop);
+
+ SharedPropMapAndIndex childEntry(child, *mapLength - 1);
+ if (!map->addChild(cx, childEntry, id, prop)) {
+ return false;
+ }
+
+ map.set(child);
+ *mapLength += 1;
+ return true;
+ }
+
+ // This map is full so look up or create a child map.
+ MOZ_ASSERT(*mapLength == PropMap::Capacity);
+
+ if (SharedPropMap* child = map->lookupChild(*mapLength, id, prop)) {
+ map.set(child);
+ *mapLength = 1;
+ return true;
+ }
+
+ SharedPropMap* child = SharedPropMap::create(cx, map, id, prop);
+ if (!child) {
+ return false;
+ }
+
+ SharedPropMapAndIndex childEntry(child, PropMap::Capacity - 1);
+ if (!map->addChild(cx, childEntry, id, prop)) {
+ return false;
+ }
+
+ // As an optimization, pass the table to the new child map, unless adding the
+ // property to it OOMs. Measurements indicate this gets rid of a large number
+ // of PropMapTable allocations because we don't need to create a second table
+ // when the parent map won't be used again as last map.
+ if (map->canHaveTable()) {
+ JS::AutoCheckCannotGC nogc;
+ if (PropMapTable* table = map->asLinked()->maybeTable(nogc)) {
+ // Trigger a pre-barrier on the parent map to appease the pre-barrier
+ // verifier, because edges from the table are disappearing (even though
+ // these edges are strictly redundant with the |previous| maps).
+ gc::PreWriteBarrier(map.get());
+ if (table->add(cx, id, PropMapAndIndex(child, 0))) {
+ map->asLinked()->handOffTableTo(child->asLinked());
+ } else {
+ cx->recoverFromOutOfMemory();
+ }
+ }
+ }
+
+ map.set(child);
+ *mapLength = 1;
+ return true;
+}
+
+static PropertyFlags ComputeFlagsForSealOrFreeze(PropertyKey key,
+ PropertyFlags flags,
+ IntegrityLevel level) {
+ // Private fields are not visible to SetIntegrityLevel.
+ if (key.isSymbol() && key.toSymbol()->isPrivateName()) {
+ return flags;
+ }
+
+ // Make all properties non-configurable; if freezing, make data properties
+ // read-only.
+ flags.clearFlag(PropertyFlag::Configurable);
+ if (level == IntegrityLevel::Frozen && flags.isDataDescriptor()) {
+ flags.clearFlag(PropertyFlag::Writable);
+ }
+
+ return flags;
+}
+
+// static
+bool SharedPropMap::freezeOrSealProperties(JSContext* cx, IntegrityLevel level,
+ const JSClass* clasp,
+ MutableHandle<SharedPropMap*> map,
+ uint32_t mapLength,
+ ObjectFlags* objectFlags) {
+ // Add all maps to a Vector so we can iterate over them in reverse order
+ // (property definition order).
+ JS::RootedVector<SharedPropMap*> maps(cx);
+ {
+ SharedPropMap* curMap = map;
+ while (true) {
+ if (!maps.append(curMap)) {
+ return false;
+ }
+ if (!curMap->hasPrevious()) {
+ break;
+ }
+ curMap = curMap->asNormal()->previous();
+ }
+ }
+
+ // Build a new SharedPropMap by adding each property with the changed
+ // attributes.
+ Rooted<SharedPropMap*> newMap(cx);
+ uint32_t newMapLength = 0;
+
+ Rooted<PropertyKey> key(cx);
+ Rooted<SharedPropMap*> curMap(cx);
+
+ for (size_t i = maps.length(); i > 0; i--) {
+ curMap = maps[i - 1];
+ uint32_t len = (i == 1) ? mapLength : PropMap::Capacity;
+
+ for (uint32_t j = 0; j < len; j++) {
+ key = curMap->getKey(j);
+ PropertyInfo prop = curMap->getPropertyInfo(j);
+ PropertyFlags flags =
+ ComputeFlagsForSealOrFreeze(key, prop.flags(), level);
+
+ if (prop.isCustomDataProperty()) {
+ if (!addCustomDataProperty(cx, clasp, &newMap, &newMapLength, key,
+ flags, objectFlags)) {
+ return false;
+ }
+ } else {
+ if (!addPropertyWithKnownSlot(cx, clasp, &newMap, &newMapLength, key,
+ flags, prop.slot(), objectFlags)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ map.set(newMap);
+ MOZ_ASSERT(newMapLength == mapLength);
+ return true;
+}
+
+void LinkedPropMap::handOffTableTo(LinkedPropMap* next) {
+ MOZ_ASSERT(hasTable());
+ MOZ_ASSERT(!next->hasTable());
+
+ next->data_.table = data_.table;
+ data_.table = nullptr;
+
+ // Note: for tables currently only sizeof(PropMapTable) is tracked.
+ RemoveCellMemory(this, sizeof(PropMapTable), MemoryUse::PropMapTable);
+ AddCellMemory(next, sizeof(PropMapTable), MemoryUse::PropMapTable);
+}
+
+void DictionaryPropMap::handOffLastMapStateTo(DictionaryPropMap* newLast) {
+ // A dictionary object's last map contains the table, slot freeList, and
+ // holeCount. These fields always have their initial values for non-last maps.
+
+ MOZ_ASSERT(this != newLast);
+
+ if (asLinked()->hasTable()) {
+ asLinked()->handOffTableTo(newLast->asLinked());
+ }
+
+ MOZ_ASSERT(newLast->freeList_ == SHAPE_INVALID_SLOT);
+ newLast->freeList_ = freeList_;
+ freeList_ = SHAPE_INVALID_SLOT;
+
+ MOZ_ASSERT(newLast->holeCount_ == 0);
+ newLast->holeCount_ = holeCount_;
+ holeCount_ = 0;
+}
+
+// static
+bool DictionaryPropMap::addProperty(JSContext* cx, const JSClass* clasp,
+ MutableHandle<DictionaryPropMap*> map,
+ uint32_t* mapLength, HandleId id,
+ PropertyFlags flags, uint32_t slot,
+ ObjectFlags* objectFlags) {
+ MOZ_ASSERT(map);
+
+ *objectFlags =
+ GetObjectFlagsForNewProperty(clasp, *objectFlags, id, flags, cx);
+ PropertyInfo prop = PropertyInfo(flags, slot);
+
+ if (*mapLength < PropMap::Capacity) {
+ JS::AutoCheckCannotGC nogc;
+ if (PropMapTable* table = map->asLinked()->maybeTable(nogc)) {
+ if (!table->add(cx, id, PropMapAndIndex(map, *mapLength))) {
+ return false;
+ }
+ }
+ map->initProperty(*mapLength, id, prop);
+ *mapLength += 1;
+ return true;
+ }
+
+ DictionaryPropMap* newMap = cx->newCell<DictionaryPropMap>(map, id, prop);
+ if (!newMap) {
+ return false;
+ }
+
+ JS::AutoCheckCannotGC nogc;
+ if (PropMapTable* table = map->asLinked()->maybeTable(nogc)) {
+ if (!table->add(cx, id, PropMapAndIndex(newMap, 0))) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(newMap->previous() == map);
+ map->handOffLastMapStateTo(newMap);
+
+ map.set(newMap);
+ *mapLength = 1;
+ return true;
+}
+
+void DictionaryPropMap::changeProperty(JSContext* cx, const JSClass* clasp,
+ uint32_t index, PropertyFlags flags,
+ uint32_t slot,
+ ObjectFlags* objectFlags) {
+ MOZ_ASSERT(hasKey(index));
+ *objectFlags = GetObjectFlagsForNewProperty(clasp, *objectFlags,
+ getKey(index), flags, cx);
+ linkedData_.propInfos[index] = PropertyInfo(flags, slot);
+}
+
+void DictionaryPropMap::freezeOrSealProperties(JSContext* cx,
+ IntegrityLevel level,
+ const JSClass* clasp,
+ uint32_t mapLength,
+ ObjectFlags* objectFlags) {
+ DictionaryPropMap* curMap = this;
+ do {
+ for (uint32_t i = 0; i < mapLength; i++) {
+ if (!curMap->hasKey(i)) {
+ continue;
+ }
+ PropertyKey key = curMap->getKey(i);
+ PropertyFlags flags = curMap->getPropertyInfo(i).flags();
+ flags = ComputeFlagsForSealOrFreeze(key, flags, level);
+ curMap->changePropertyFlags(cx, clasp, i, flags, objectFlags);
+ }
+ curMap = curMap->previous();
+ mapLength = PropMap::Capacity;
+ } while (curMap);
+}
+
+// static
+void DictionaryPropMap::skipTrailingHoles(MutableHandle<DictionaryPropMap*> map,
+ uint32_t* mapLength) {
+ // After removing a property, rewind map/mapLength so that the last property
+ // is not a hole. This ensures accessing the last property of a map can always
+ // be done without checking for holes.
+
+ while (true) {
+ MOZ_ASSERT(*mapLength > 0);
+ do {
+ if (map->hasKey(*mapLength - 1)) {
+ return;
+ }
+ map->decHoleCount();
+ *mapLength -= 1;
+ } while (*mapLength > 0);
+
+ if (!map->previous()) {
+ // The dictionary map is empty, return the initial map with mapLength 0.
+ MOZ_ASSERT(*mapLength == 0);
+ MOZ_ASSERT(map->holeCount_ == 0);
+ return;
+ }
+
+ map->handOffLastMapStateTo(map->previous());
+ map.set(map->previous());
+ *mapLength = PropMap::Capacity;
+ }
+}
+
+// static
+void DictionaryPropMap::removeProperty(JSContext* cx,
+ MutableHandle<DictionaryPropMap*> map,
+ uint32_t* mapLength, PropMapTable* table,
+ PropMapTable::Ptr& ptr) {
+ MOZ_ASSERT(map);
+ MOZ_ASSERT(*mapLength > 0);
+
+ JS::AutoCheckCannotGC nogc;
+ MOZ_ASSERT(map->asLinked()->maybeTable(nogc) == table);
+
+ bool removingLast = (map == ptr->map() && *mapLength - 1 == ptr->index());
+ ptr->map()->asDictionary()->clearProperty(ptr->index());
+ map->incHoleCount();
+ table->remove(ptr);
+
+ if (removingLast) {
+ skipTrailingHoles(map, mapLength);
+ }
+ maybeCompact(cx, map, mapLength);
+}
+
+// static
+void DictionaryPropMap::densifyElements(JSContext* cx,
+ MutableHandle<DictionaryPropMap*> map,
+ uint32_t* mapLength,
+ NativeObject* obj) {
+ MOZ_ASSERT(map);
+ MOZ_ASSERT(*mapLength > 0);
+
+ JS::AutoCheckCannotGC nogc;
+ PropMapTable* table = map->asLinked()->maybeTable(nogc);
+
+ DictionaryPropMap* currentMap = map;
+ uint32_t currentLen = *mapLength;
+ do {
+ for (uint32_t i = 0; i < currentLen; i++) {
+ PropertyKey key = currentMap->getKey(i);
+ uint32_t index;
+ if (!IdIsIndex(key, &index)) {
+ continue;
+ }
+
+ // The caller must have checked all sparse elements are plain data
+ // properties.
+ PropertyInfo prop = currentMap->getPropertyInfo(i);
+ MOZ_ASSERT(prop.flags() == PropertyFlags::defaultDataPropFlags);
+
+ uint32_t slot = prop.slot();
+ Value value = obj->getSlot(slot);
+ obj->setDenseElement(index, value);
+ obj->freeDictionarySlot(slot);
+
+ if (table) {
+ PropMapTable::Ptr p = table->lookupRaw(key);
+ MOZ_ASSERT(p);
+ table->remove(p);
+ }
+
+ currentMap->clearProperty(i);
+ map->incHoleCount();
+ }
+ currentMap = currentMap->previous();
+ currentLen = PropMap::Capacity;
+ } while (currentMap);
+
+ skipTrailingHoles(map, mapLength);
+ maybeCompact(cx, map, mapLength);
+}
+
+void DictionaryPropMap::maybeCompact(JSContext* cx,
+ MutableHandle<DictionaryPropMap*> map,
+ uint32_t* mapLength) {
+ // If there are no holes, there's nothing to compact.
+ if (map->holeCount_ == 0) {
+ return;
+ }
+
+ JS::AutoCheckCannotGC nogc;
+ PropMapTable* table = map->asLinked()->ensureTable(cx, nogc);
+ if (!table) {
+ // Compacting is optional so just return.
+ cx->recoverFromOutOfMemory();
+ return;
+ }
+
+ // Heuristic: only compact if the number of holes >= the number of (non-hole)
+ // entries.
+ if (map->holeCount_ < table->entryCount()) {
+ return;
+ }
+
+ // Add all dictionary maps to a Vector so that we can iterate over them in
+ // reverse order (property definition order). If appending to the Vector OOMs,
+ // just return because compacting is optional.
+ Vector<DictionaryPropMap*, 32, SystemAllocPolicy> maps;
+ for (DictionaryPropMap* curMap = map; curMap; curMap = curMap->previous()) {
+ if (!maps.append(curMap)) {
+ return;
+ }
+ }
+
+ // Use two cursors: readMapCursor/readIndexCursor iterates over all properties
+ // starting at the first one, to search for the next non-hole entry.
+ // writeMapCursor/writeIndexCursor is used to write all non-hole keys.
+ //
+ // At the start of the loop, these cursors point to the next property slot to
+ // read/write.
+
+ size_t readMapCursorVectorIndex = maps.length() - 1;
+ DictionaryPropMap* readMapCursor = maps[readMapCursorVectorIndex];
+ uint32_t readIndexCursor = 0;
+
+ size_t writeMapCursorVectorIndex = readMapCursorVectorIndex;
+ DictionaryPropMap* writeMapCursor = readMapCursor;
+ uint32_t writeIndexCursor = 0;
+
+ mozilla::DebugOnly<uint32_t> numHoles = 0;
+
+ while (true) {
+ if (readMapCursor->hasKey(readIndexCursor)) {
+ // Found a non-hole entry, copy it to its new position and update the
+ // PropMapTable to point to this new entry. Only do this if the read and
+ // write positions are different from each other.
+ if (readMapCursor != writeMapCursor ||
+ readIndexCursor != writeIndexCursor) {
+ PropertyKey key = readMapCursor->getKey(readIndexCursor);
+ auto p = table->lookupRaw(key);
+ MOZ_ASSERT(p);
+ MOZ_ASSERT(p->map() == readMapCursor);
+ MOZ_ASSERT(p->index() == readIndexCursor);
+
+ writeMapCursor->setKey(writeIndexCursor, key);
+ writeMapCursor->linkedData_.propInfos[writeIndexCursor] =
+ readMapCursor->linkedData_.propInfos[readIndexCursor];
+
+ PropMapAndIndex newEntry(writeMapCursor, writeIndexCursor);
+ table->replaceEntry(p, key, newEntry);
+ }
+ // Advance the write cursor.
+ writeIndexCursor++;
+ if (writeIndexCursor == PropMap::Capacity) {
+ MOZ_ASSERT(writeMapCursorVectorIndex > 0);
+ writeMapCursorVectorIndex--;
+ writeMapCursor = maps[writeMapCursorVectorIndex];
+ writeIndexCursor = 0;
+ }
+ } else {
+ numHoles++;
+ }
+ // Advance the read cursor. If there are no more maps to read from, we're
+ // done.
+ readIndexCursor++;
+ if (readIndexCursor == PropMap::Capacity) {
+ if (readMapCursorVectorIndex == 0) {
+ break;
+ }
+ readMapCursorVectorIndex--;
+ readMapCursor = maps[readMapCursorVectorIndex];
+ readIndexCursor = 0;
+ }
+ }
+
+ // Sanity check: the read cursor skipped holes between properties and holes
+ // at the end of the last map (these are not included in holeCount_).
+ MOZ_ASSERT(map->holeCount_ + (PropMap::Capacity - *mapLength) == numHoles);
+
+ // The write cursor points to the next available slot. If this is at the start
+ // of a new map, use the previous map (which must be full) instead.
+ if (writeIndexCursor == 0 && writeMapCursor->previous()) {
+ writeMapCursor = writeMapCursor->previous();
+ *mapLength = PropMap::Capacity;
+ } else {
+ *mapLength = writeIndexCursor;
+ }
+
+ // Ensure the last map does not have any keys in [mapLength, Capacity).
+ for (uint32_t i = *mapLength; i < PropMap::Capacity; i++) {
+ writeMapCursor->clearProperty(i);
+ }
+
+ if (writeMapCursor != map) {
+ map->handOffLastMapStateTo(writeMapCursor);
+ map.set(writeMapCursor);
+ }
+ map->holeCount_ = 0;
+
+ MOZ_ASSERT(*mapLength <= PropMap::Capacity);
+ MOZ_ASSERT_IF(*mapLength == 0, !map->previous());
+ MOZ_ASSERT_IF(!map->previous(), table->entryCount() == *mapLength);
+}
+
+void SharedPropMap::fixupAfterMovingGC() {
+ SharedChildrenPtr& childrenRef = treeDataRef().children;
+ if (childrenRef.isNone()) {
+ return;
+ }
+
+ if (!hasChildrenSet()) {
+ SharedPropMapAndIndex child = childrenRef.toSingleChild();
+ if (gc::IsForwarded(child.map())) {
+ child = SharedPropMapAndIndex(gc::Forwarded(child.map()), child.index());
+ childrenRef.setSingleChild(child);
+ }
+ return;
+ }
+
+ SharedChildrenSet* set = childrenRef.toChildrenSet();
+ for (SharedChildrenSet::Enum e(*set); !e.empty(); e.popFront()) {
+ SharedPropMapAndIndex child = e.front();
+ if (IsForwarded(child.map())) {
+ child = SharedPropMapAndIndex(Forwarded(child.map()), child.index());
+ e.mutableFront() = child;
+ }
+ }
+}
+
+void SharedPropMap::removeChild(JS::GCContext* gcx, SharedPropMap* child) {
+ SharedPropMapAndIndex& parentRef = child->treeDataRef().parent;
+ MOZ_ASSERT(parentRef.map() == this);
+
+ uint32_t index = parentRef.index();
+ parentRef.setNone();
+
+ SharedChildrenPtr& childrenRef = treeDataRef().children;
+ MOZ_ASSERT(!childrenRef.isNone());
+
+ if (!hasChildrenSet()) {
+ MOZ_ASSERT(childrenRef.toSingleChild().map() == child);
+ MOZ_ASSERT(childrenRef.toSingleChild().index() == index);
+ childrenRef.setNone();
+ return;
+ }
+
+ SharedChildrenSet* set = childrenRef.toChildrenSet();
+ {
+ uint32_t nextIndex = SharedPropMap::indexOfNextProperty(index);
+ SharedChildrenHasher::Lookup lookup(
+ child->getPropertyInfoWithKey(nextIndex), index);
+ auto p = set->lookup(lookup);
+ MOZ_ASSERT(p, "Child must be in children set");
+ set->remove(p);
+ }
+
+ MOZ_ASSERT(set->count() >= 1);
+
+ if (set->count() == 1) {
+ // Convert from set form back to single child form.
+ SharedChildrenSet::Range r = set->all();
+ SharedPropMapAndIndex remainingChild = r.front();
+ childrenRef.setSingleChild(remainingChild);
+ clearHasChildrenSet();
+ gcx->delete_(this, set, MemoryUse::PropMapChildren);
+ }
+}
+
+void LinkedPropMap::purgeTable(JS::GCContext* gcx) {
+ MOZ_ASSERT(hasTable());
+ gcx->delete_(this, data_.table, MemoryUse::PropMapTable);
+ data_.table = nullptr;
+}
+
+uint32_t PropMap::approximateEntryCount() const {
+ // Returns a number that's guaranteed to tbe >= the exact number of properties
+ // in this map (including previous maps). This is used to reserve space in the
+ // HashSet when allocating a table for this map.
+
+ const PropMap* map = this;
+ uint32_t count = 0;
+ JS::AutoCheckCannotGC nogc;
+ while (true) {
+ if (!map->hasPrevious()) {
+ return count + PropMap::Capacity;
+ }
+ if (PropMapTable* table = map->asLinked()->maybeTable(nogc)) {
+ return count + table->entryCount();
+ }
+ count += PropMap::Capacity;
+ map = map->asLinked()->previous();
+ }
+}
+
+bool PropMapTable::init(JSContext* cx, LinkedPropMap* map) {
+ if (!set_.reserve(map->approximateEntryCount())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ PropMap* curMap = map;
+ while (true) {
+ for (uint32_t i = 0; i < PropMap::Capacity; i++) {
+ if (curMap->hasKey(i)) {
+ PropertyKey key = curMap->getKey(i);
+ set_.putNewInfallible(key, PropMapAndIndex(curMap, i));
+ }
+ }
+ if (!curMap->hasPrevious()) {
+ break;
+ }
+ curMap = curMap->asLinked()->previous();
+ }
+
+ return true;
+}
+
+void PropMapTable::trace(JSTracer* trc) {
+ purgeCache();
+
+ for (Set::Enum e(set_); !e.empty(); e.popFront()) {
+ PropMap* map = e.front().map();
+ TraceManuallyBarrieredEdge(trc, &map, "PropMapTable map");
+ if (map != e.front().map()) {
+ e.mutableFront() = PropMapAndIndex(map, e.front().index());
+ }
+ }
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void PropMapTable::checkAfterMovingGC() {
+ for (Set::Enum e(set_); !e.empty(); e.popFront()) {
+ PropMap* map = e.front().map();
+ MOZ_ASSERT(map);
+ CheckGCThingAfterMovingGC(map);
+
+ PropertyKey key = map->getKey(e.front().index());
+ MOZ_RELEASE_ASSERT(!key.isVoid());
+
+ auto p = lookupRaw(key);
+ MOZ_RELEASE_ASSERT(p.found() && *p == e.front());
+ }
+}
+#endif
+
+#ifdef DEBUG
+bool LinkedPropMap::canSkipMarkingTable() {
+ if (!hasTable()) {
+ return true;
+ }
+
+ PropMapTable* table = data_.table;
+ uint32_t count = 0;
+
+ PropMap* map = this;
+ while (true) {
+ for (uint32_t i = 0; i < Capacity; i++) {
+ if (map->hasKey(i)) {
+ PropertyKey key = map->getKey(i);
+ PropMapTable::Ptr p = table->readonlyThreadsafeLookup(key);
+ MOZ_ASSERT(*p == PropMapAndIndex(map, i));
+ count++;
+ }
+ }
+ if (!map->hasPrevious()) {
+ break;
+ }
+ map = map->asLinked()->previous();
+ }
+
+ return count == table->entryCount();
+}
+#endif
+
+bool LinkedPropMap::createTable(JSContext* cx) {
+ MOZ_ASSERT(canHaveTable());
+ MOZ_ASSERT(!hasTable());
+
+ UniquePtr<PropMapTable> table = cx->make_unique<PropMapTable>();
+ if (!table) {
+ return false;
+ }
+
+ if (!table->init(cx, this)) {
+ return false;
+ }
+
+ data_.table = table.release();
+ // TODO: The contents of PropMapTable is not currently tracked, only the
+ // object itself.
+ AddCellMemory(this, sizeof(PropMapTable), MemoryUse::PropMapTable);
+ return true;
+}
+
+#ifdef DEBUG
+void PropMap::dump(js::GenericPrinter& out) const {
+ out.printf("map @ 0x%p\n", this);
+ out.printf("previous: 0x%p\n",
+ hasPrevious() ? asLinked()->previous() : nullptr);
+
+ if (canHaveTable()) {
+ out.printf("table: 0x%p\n", asLinked()->data_.table);
+ } else {
+ out.printf("table: (too small for table)\n");
+ }
+
+ if (isShared()) {
+ out.printf("type: shared\n");
+ out.printf(" compact: %s\n", isCompact() ? "yes" : "no");
+ SharedPropMapAndIndex parent = asShared()->treeDataRef().parent;
+ if (parent.isNone()) {
+ out.printf(" parent: (none)\n");
+ } else {
+ out.printf(" parent: 0x%p [%u]\n", parent.map(), parent.index());
+ }
+ } else {
+ const DictionaryPropMap* dictMap = asDictionary();
+ out.printf("type: dictionary\n");
+ out.printf(" freeList: %u\n", dictMap->freeList_);
+ out.printf(" holeCount: %u\n", dictMap->holeCount_);
+ }
+
+ out.printf("properties:\n");
+ for (uint32_t i = 0; i < Capacity; i++) {
+ out.printf(" %u: ", i);
+
+ if (!hasKey(i)) {
+ out.printf("(empty)\n");
+ continue;
+ }
+
+ PropertyKey key = getKey(i);
+ if (key.isInt()) {
+ out.printf("[%d]", key.toInt());
+ } else if (key.isAtom()) {
+ EscapedStringPrinter(out, key.toAtom(), '"');
+ } else {
+ MOZ_ASSERT(key.isSymbol());
+ key.toSymbol()->dump(out);
+ }
+
+ PropertyInfo prop = getPropertyInfo(i);
+ out.printf(" slot %u flags 0x%x ", prop.maybeSlot(), prop.flags().toRaw());
+
+ if (!prop.flags().isEmpty()) {
+ bool first = true;
+ auto dumpFlag = [&](PropertyFlag flag, const char* name) {
+ if (!prop.flags().hasFlag(flag)) {
+ return;
+ }
+ if (!first) {
+ out.putChar(' ');
+ }
+ out.put(name);
+ first = false;
+ };
+ out.putChar('(');
+ dumpFlag(PropertyFlag::Enumerable, "enumerable");
+ dumpFlag(PropertyFlag::Configurable, "configurable");
+ dumpFlag(PropertyFlag::Writable, "writable");
+ dumpFlag(PropertyFlag::AccessorProperty, "accessor");
+ dumpFlag(PropertyFlag::CustomDataProperty, "custom-data");
+ out.putChar(')');
+ }
+ out.putChar('\n');
+ }
+}
+
+void PropMap::dump() const {
+ Fprinter out(stderr);
+ dump(out);
+}
+
+void PropMap::checkConsistency(NativeObject* obj) const {
+ const uint32_t mapLength = obj->shape()->propMapLength();
+ MOZ_ASSERT(mapLength <= PropMap::Capacity);
+
+ JS::AutoCheckCannotGC nogc;
+ if (isDictionary()) {
+ // Check dictionary slot free list.
+ for (uint32_t fslot = asDictionary()->freeList();
+ fslot != SHAPE_INVALID_SLOT;
+ fslot = obj->getSlot(fslot).toPrivateUint32()) {
+ MOZ_ASSERT(fslot < obj->slotSpan());
+ }
+
+ auto* table = asLinked()->maybeTable(nogc);
+ const DictionaryPropMap* curMap = asDictionary();
+ uint32_t numHoles = 0;
+ do {
+ // Some fields must only be set for the last dictionary map.
+ if (curMap != this) {
+ MOZ_ASSERT(!curMap->asLinked()->hasTable());
+ MOZ_ASSERT(curMap->holeCount_ == 0);
+ MOZ_ASSERT(curMap->freeList_ == SHAPE_INVALID_SLOT);
+ }
+
+ for (uint32_t i = 0; i < PropMap::Capacity; i++) {
+ if (!curMap->hasKey(i)) {
+ if (curMap != this || i < mapLength) {
+ numHoles++;
+ }
+ continue;
+ }
+
+ // The last dictionary map must only have keys up to mapLength.
+ MOZ_ASSERT_IF(curMap == this, i < mapLength);
+
+ PropertyInfo prop = curMap->getPropertyInfo(i);
+ MOZ_ASSERT_IF(prop.hasSlot(), prop.slot() < obj->slotSpan());
+
+ // All properties must be in the table.
+ if (table) {
+ PropertyKey key = curMap->getKey(i);
+ auto p = table->lookupRaw(key);
+ MOZ_ASSERT(p->map() == curMap);
+ MOZ_ASSERT(p->index() == i);
+ }
+ }
+ curMap = curMap->previous();
+ } while (curMap);
+
+ MOZ_ASSERT(asDictionary()->holeCount_ == numHoles);
+ return;
+ }
+
+ MOZ_ASSERT(mapLength > 0);
+
+ const SharedPropMap* curMap = asShared();
+ auto* table =
+ curMap->canHaveTable() ? curMap->asLinked()->maybeTable(nogc) : nullptr;
+
+ // Shared maps without a previous map never have a table.
+ MOZ_ASSERT_IF(!curMap->hasPrevious(), !curMap->canHaveTable());
+
+ const SharedPropMap* nextMap = nullptr;
+ mozilla::Maybe<uint32_t> nextSlot;
+ while (true) {
+ // Verify numPreviousMaps is set correctly.
+ MOZ_ASSERT_IF(nextMap && nextMap->numPreviousMaps() != NumPreviousMapsMax,
+ curMap->numPreviousMaps() + 1 == nextMap->numPreviousMaps());
+ MOZ_ASSERT(curMap->hasPrevious() == (curMap->numPreviousMaps() > 0));
+
+ // If a previous map also has a table, it must have fewer entries than the
+ // last map's table.
+ if (table && curMap != this && curMap->canHaveTable()) {
+ if (auto* table2 = curMap->asLinked()->maybeTable(nogc)) {
+ MOZ_ASSERT(table2->entryCount() < table->entryCount());
+ }
+ }
+
+ for (int32_t i = PropMap::Capacity - 1; i >= 0; i--) {
+ uint32_t index = uint32_t(i);
+
+ // Only the last map can have holes, for entries following mapLength.
+ if (!curMap->hasKey(index)) {
+ MOZ_ASSERT(index > 0);
+ MOZ_ASSERT(curMap == this);
+ MOZ_ASSERT(index >= mapLength);
+ continue;
+ }
+
+ // Check slot numbers are within slot span and never decreasing.
+ PropertyInfo prop = curMap->getPropertyInfo(i);
+ if (prop.hasSlot()) {
+ MOZ_ASSERT_IF((curMap != this || index < mapLength),
+ prop.slot() < obj->slotSpan());
+ MOZ_ASSERT_IF(nextSlot.isSome(), *nextSlot >= prop.slot());
+ nextSlot = mozilla::Some(prop.slot());
+ }
+
+ // All properties must be in the table.
+ if (table) {
+ PropertyKey key = curMap->getKey(index);
+ auto p = table->lookupRaw(key);
+ MOZ_ASSERT(p->map() == curMap);
+ MOZ_ASSERT(p->index() == index);
+ }
+ }
+
+ if (!curMap->hasPrevious()) {
+ break;
+ }
+ nextMap = curMap;
+ curMap = curMap->asLinked()->previous()->asShared();
+ }
+}
+#endif // DEBUG
+
+JS::ubi::Node::Size JS::ubi::Concrete<PropMap>::size(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ Size size = js::gc::Arena::thingSize(get().asTenured().getAllocKind());
+ size_t children = 0;
+ size_t tables = 0;
+ get().addSizeOfExcludingThis(mallocSizeOf, &children, &tables);
+ return size + children + tables;
+}
diff --git a/js/src/vm/PropMap.h b/js/src/vm/PropMap.h
new file mode 100644
index 0000000000..b47b43e86c
--- /dev/null
+++ b/js/src/vm/PropMap.h
@@ -0,0 +1,1167 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_PropMap_h
+#define vm_PropMap_h
+
+#include "gc/Barrier.h"
+#include "gc/Cell.h"
+#include "js/TypeDecls.h"
+#include "js/UbiNode.h"
+#include "vm/ObjectFlags.h"
+#include "vm/PropertyInfo.h"
+#include "vm/PropertyKey.h"
+
+// [SMDOC] Property Maps
+//
+// Property maps are used to store information about native object properties.
+// Each property map represents an ordered list of (PropertyKey, PropertyInfo)
+// tuples.
+//
+// Each property map can store up to 8 properties (see PropMap::Capacity). To
+// store more than eight properties, multiple maps must be linked together with
+// the |previous| pointer.
+//
+// Shapes and Property Maps
+// ------------------------
+// Native object shapes represent property information as a (PropMap*, length)
+// tuple. When there are no properties yet, the shape's map will be nullptr and
+// the length is zero.
+//
+// For example, consider the following objects:
+//
+// o1 = {x: 1, y: 2}
+// o2 = {x: 3, y: 4, z: 5}
+//
+// This is stored as follows:
+//
+// +-------------+ +--------------+ +-------------------+
+// | JSObject o1 | | Shape S1 | | PropMap M1 |
+// |-------------+ +--------------+ +-------------------+
+// | shape: S1 -+---> | map: M1 -+--+> | key 0: x (slot 0) |
+// | slot 0: 1 | | mapLength: 2 | | | key 1: y (slot 1) |
+// | slot 1: 2 | +--------------+ | | key 2: z (slot 2) |
+// +-------------+ | | ... |
+// | +-------------------+
+// |
+// +-------------+ +--------------+ |
+// | JSObject o2 | | Shape S2 | |
+// |-------------+ +--------------+ |
+// | shape: S2 -+---> | map: M1 -+--+
+// | slot 0: 3 | | mapLength: 3 |
+// | slot 1: 4 | +--------------+
+// | slot 2: 5 |
+// +-------------+
+//
+// There's a single map M1 shared by shapes S1 and S2. Shape S1 includes only
+// the first two properties and shape S2 includes all three properties.
+//
+// Class Hierarchy
+// ---------------
+// Property maps have the following C++ class hierarchy:
+//
+// PropMap (abstract)
+// |
+// +-- SharedPropMap (abstract)
+// | |
+// | +-- CompactPropMap
+// | |
+// | +-- NormalPropMap
+// |
+// +-- DictionaryPropMap
+//
+// * PropMap: base class. It has a flags word and an array of PropertyKeys.
+//
+// * SharedPropMap: base class for all shared property maps. See below for more
+// information on shared maps.
+//
+// * CompactPropMap: a shared map that stores its information more compactly
+// than the other maps. It saves four words by not storing a
+// PropMapTable, previous pointer, and by using a more compact
+// PropertyInfo type for slot numbers that fit in one byte.
+//
+// * NormalPropMap: a shared map, used when CompactPropMap can't be used.
+//
+// * DictionaryPropMap: an unshared map (used by a single object/shape). See
+// below for more information on dictionary maps.
+//
+// Secondary hierarchy
+// -------------------
+// NormalPropMap and DictionaryPropMap store property information in the same
+// way. This means property lookups don't have to distinguish between these two
+// types. This is represented with a second class hierarchy:
+//
+// PropMap (abstract)
+// |
+// +-- CompactPropMap
+// |
+// +-- LinkedPropMap (NormalPropMap or DictionaryPropMap)
+//
+// Property lookup and property iteration are very performance sensitive and use
+// this Compact vs Linked "view" so that they don't have to handle the three map
+// types separately.
+//
+// LinkedPropMap also stores the PropMapTable and a pointer to the |previous|
+// map. Compact maps don't have these fields.
+//
+// To summarize these map types:
+//
+// +-------------------+-------------+--------+
+// | Concrete type | Shared/tree | Linked |
+// +-------------------+-------------+--------+
+// | CompactPropMap | yes | no |
+// | NormalPropMap | yes | yes |
+// | DictionaryPropMap | no | yes |
+// +-------------------+-------------+--------+
+//
+// PropMapTable
+// ------------
+// Finding the PropertyInfo for a particular PropertyKey requires a linear
+// search if the map is small. For larger maps we can create a PropMapTable, a
+// hash table that maps from PropertyKey to PropMap + index, to speed up
+// property lookups.
+//
+// To save memory, property map tables can be discarded on GC and recreated when
+// needed. AutoKeepPropMapTables can be used to avoid discarding tables in a
+// particular zone. Methods to access a PropMapTable take either an
+// AutoCheckCannotGC or AutoKeepPropMapTables argument, to help ensure tables
+// are not purged while we're using them.
+//
+// Shared Property Maps
+// --------------------
+// Shared property maps can be shared per-Zone by objects with the same property
+// keys, flags, and slot numbers. To make this work, shared maps form a tree:
+//
+// - Each Zone has a table that maps from first PropertyKey + PropertyInfo to
+// a SharedPropMap that begins with that property. This is used to lookup the
+// the map to use when adding the first property.
+// See ShapeZone::initialPropMaps.
+//
+// - When adding a property other than the first one, the property is stored in
+// the next entry of the same map when possible. If the map is full or the
+// next entry already stores a different property, a child map is created and
+// linked to the parent map.
+//
+// For example, imagine we want to create these objects:
+//
+// o1 = {x: 1, y: 2, z: 3}
+// o2 = {x: 1, y: 2, foo: 4}
+//
+// This will result in the following maps being created:
+//
+// +---------------------+ +---------------------+
+// | SharedPropMap M1 | | SharedPropMap M2 |
+// +---------------------+ +---------------------+
+// | Child M2 (index 1) -+--> | Parent M1 (index 1) |
+// +---------------------+ +---------------------+
+// | 0: x | | 0: x |
+// | 1: y | | 1: y |
+// | 2: z | | 2: foo |
+// | ... | | ... |
+// +---------------------+ +---------------------+
+//
+// M1 is the map used for initial property "x". Properties "y" and "z" can be
+// stored inline. When later adding "foo" following "y", the map has to be
+// forked: a child map M2 is created and M1 remembers this transition at
+// property index 1 so that M2 will be used the next time properties "x", "y",
+// and "foo" are added to an object.
+//
+// Shared maps contain a TreeData struct that stores the parent and children
+// links for the SharedPropMap tree. The parent link is a tagged pointer that
+// stores both the parent map and the property index of the last used property
+// in the parent map before the branch. The children are stored similarly: the
+// parent map can store a single child map and index, or a set of children.
+// See SharedChildrenPtr.
+//
+// Looking up a child map can then be done based on the index of the last
+// property in the parent map and the new property's key and flags. So for the
+// example above, the lookup key for M1 => M2 is (index 1, "foo", <flags>).
+//
+// Note: shared maps can have both a |previous| map and a |parent| map. They are
+// equal when the previous map was full, but can be different maps when
+// branching in the middle of a map like in the example above: M2 has parent M1
+// but does not have a |previous| map (because it only has three properties).
+//
+// Dictionary Property Maps
+// ------------------------
+// Certain operations can't be implemented (efficiently) for shared property
+// maps, for example changing or deleting a property other than the last one.
+// When this happens the map is copied as a DictionaryPropMap.
+//
+// Dictionary maps are unshared so can be mutated in place (after generating a
+// new shape for the object).
+//
+// Unlike shared maps, dictionary maps can have holes between two property keys
+// after removing a property. When there are more holes than properties, the
+// map is compacted. See DictionaryPropMap::maybeCompact.
+
+namespace js {
+
+enum class IntegrityLevel;
+
+class DictionaryPropMap;
+class SharedPropMap;
+class LinkedPropMap;
+class CompactPropMap;
+class NormalPropMap;
+
+// Template class for storing a PropMap* and a property index as tagged pointer.
+template <typename T>
+class MapAndIndex {
+ uintptr_t data_ = 0;
+
+ static constexpr uintptr_t IndexMask = 0b111;
+
+ public:
+ MapAndIndex() = default;
+
+ MapAndIndex(const T* map, uint32_t index) : data_(uintptr_t(map) | index) {
+ MOZ_ASSERT((uintptr_t(map) & IndexMask) == 0);
+ MOZ_ASSERT(index <= IndexMask);
+ }
+ explicit MapAndIndex(uintptr_t data) : data_(data) {}
+
+ void setNone() { data_ = 0; }
+
+ bool isNone() const { return data_ == 0; }
+
+ uintptr_t raw() const { return data_; }
+ T* maybeMap() const { return reinterpret_cast<T*>(data_ & ~IndexMask); }
+
+ uint32_t index() const {
+ MOZ_ASSERT(!isNone());
+ return data_ & IndexMask;
+ }
+ T* map() const {
+ MOZ_ASSERT(!isNone());
+ return maybeMap();
+ }
+
+ inline PropertyInfo propertyInfo() const;
+
+ bool operator==(const MapAndIndex<T>& other) const {
+ return data_ == other.data_;
+ }
+ bool operator!=(const MapAndIndex<T>& other) const {
+ return !operator==(other);
+ }
+} JS_HAZ_GC_POINTER;
+using PropMapAndIndex = MapAndIndex<PropMap>;
+using SharedPropMapAndIndex = MapAndIndex<SharedPropMap>;
+
+struct SharedChildrenHasher;
+using SharedChildrenSet =
+ HashSet<SharedPropMapAndIndex, SharedChildrenHasher, SystemAllocPolicy>;
+
+// Children of shared maps. This is either:
+//
+// - None (no children)
+// - SingleMapAndIndex (one child map, including the property index of the last
+// property before the branch)
+// - SharedChildrenSet (multiple children)
+//
+// Because SingleMapAndIndex use all bits, this relies on the HasChildrenSet
+// flag in the map to distinguish the latter two cases.
+class SharedChildrenPtr {
+ uintptr_t data_ = 0;
+
+ public:
+ bool isNone() const { return data_ == 0; }
+ void setNone() { data_ = 0; }
+
+ void setSingleChild(SharedPropMapAndIndex child) { data_ = child.raw(); }
+ void setChildrenSet(SharedChildrenSet* set) { data_ = uintptr_t(set); }
+
+ SharedPropMapAndIndex toSingleChild() const {
+ MOZ_ASSERT(!isNone());
+ return SharedPropMapAndIndex(data_);
+ }
+
+ SharedChildrenSet* toChildrenSet() const {
+ MOZ_ASSERT(!isNone());
+ return reinterpret_cast<SharedChildrenSet*>(data_);
+ }
+} JS_HAZ_GC_POINTER;
+
+// Ensures no property map tables are purged in the current zone.
+class MOZ_RAII AutoKeepPropMapTables {
+ JSContext* cx_;
+ bool prev_;
+
+ public:
+ void operator=(const AutoKeepPropMapTables&) = delete;
+ AutoKeepPropMapTables(const AutoKeepPropMapTables&) = delete;
+ explicit inline AutoKeepPropMapTables(JSContext* cx);
+ inline ~AutoKeepPropMapTables();
+};
+
+// Hash table to optimize property lookups on larger maps. This maps from
+// PropertyKey to PropMapAndIndex.
+class PropMapTable {
+ struct Hasher {
+ using Key = PropMapAndIndex;
+ using Lookup = PropertyKey;
+ static MOZ_ALWAYS_INLINE HashNumber hash(PropertyKey key);
+ static MOZ_ALWAYS_INLINE bool match(PropMapAndIndex, PropertyKey key);
+ };
+
+ // Small lookup cache. This has a hit rate of 30-60% on most workloads and is
+ // a lot faster than the full HashSet lookup.
+ struct CacheEntry {
+ PropertyKey key;
+ PropMapAndIndex result;
+ };
+ static constexpr uint32_t NumCacheEntries = 2;
+ CacheEntry cacheEntries_[NumCacheEntries];
+
+ using Set = HashSet<PropMapAndIndex, Hasher, SystemAllocPolicy>;
+ Set set_;
+
+ void setCacheEntry(PropertyKey key, PropMapAndIndex entry) {
+ for (uint32_t i = 0; i < NumCacheEntries; i++) {
+ if (cacheEntries_[i].key == key) {
+ cacheEntries_[i].result = entry;
+ return;
+ }
+ }
+ }
+ bool lookupInCache(PropertyKey key, PropMapAndIndex* result) const {
+ for (uint32_t i = 0; i < NumCacheEntries; i++) {
+ if (cacheEntries_[i].key == key) {
+ *result = cacheEntries_[i].result;
+#ifdef DEBUG
+ auto p = lookupRaw(key);
+ MOZ_ASSERT(*result == (p ? *p : PropMapAndIndex()));
+#endif
+ return true;
+ }
+ }
+ return false;
+ }
+ void addToCache(PropertyKey key, Set::Ptr p) {
+ for (uint32_t i = NumCacheEntries - 1; i > 0; i--) {
+ cacheEntries_[i] = cacheEntries_[i - 1];
+ MOZ_ASSERT(cacheEntries_[i].key != key);
+ }
+ cacheEntries_[0].key = key;
+ cacheEntries_[0].result = p ? *p : PropMapAndIndex();
+ }
+
+ public:
+ using Ptr = Set::Ptr;
+
+ PropMapTable() = default;
+ ~PropMapTable() = default;
+
+ uint32_t entryCount() const { return set_.count(); }
+
+ // This counts the PropMapTable object itself (which must be heap-allocated)
+ // and its HashSet.
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + set_.shallowSizeOfExcludingThis(mallocSizeOf);
+ }
+
+ // init() is fallible and reports OOM to the context.
+ bool init(JSContext* cx, LinkedPropMap* map);
+
+ MOZ_ALWAYS_INLINE PropMap* lookup(PropMap* map, uint32_t mapLength,
+ PropertyKey key, uint32_t* index);
+
+ Set::Ptr lookupRaw(PropertyKey key) const { return set_.lookup(key); }
+#ifdef DEBUG
+ Set::Ptr readonlyThreadsafeLookup(PropertyKey key) const {
+ return set_.readonlyThreadsafeLookup(key);
+ }
+#endif
+
+ bool add(JSContext* cx, PropertyKey key, PropMapAndIndex entry) {
+ if (!set_.putNew(key, entry)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ setCacheEntry(key, entry);
+ return true;
+ }
+
+ void purgeCache() {
+ for (uint32_t i = 0; i < NumCacheEntries; i++) {
+ cacheEntries_[i] = CacheEntry();
+ }
+ }
+
+ void remove(Ptr ptr) {
+ set_.remove(ptr);
+ purgeCache();
+ }
+
+ void replaceEntry(Ptr ptr, PropertyKey key, PropMapAndIndex newEntry) {
+ MOZ_ASSERT(*ptr != newEntry);
+ set_.replaceKey(ptr, key, newEntry);
+ setCacheEntry(key, newEntry);
+ }
+
+ void trace(JSTracer* trc);
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkAfterMovingGC();
+#endif
+};
+
+class PropMap : public gc::TenuredCellWithFlags {
+ public:
+ // Number of properties that can be stored in each map. This must be small
+ // enough so that every index fits in a tagged PropMap* pointer (MapAndIndex).
+ static constexpr size_t Capacity = 8;
+
+ protected:
+ static_assert(gc::CellFlagBitsReservedForGC == 3,
+ "PropMap must reserve enough bits for Cell");
+
+ enum Flags {
+ // Set if this is a CompactPropMap.
+ IsCompactFlag = 1 << 3,
+
+ // Set if this map has a non-null previous map pointer. Never set for
+ // compact maps because they don't have a previous field.
+ HasPrevFlag = 1 << 4,
+
+ // Set if this is a DictionaryPropMap.
+ IsDictionaryFlag = 1 << 5,
+
+ // Set if this map can have a table. Never set for compact maps. Always set
+ // for dictionary maps.
+ CanHaveTableFlag = 1 << 6,
+
+ // If set, this SharedPropMap has a SharedChildrenSet. Else it either has no
+ // children or a single child. See SharedChildrenPtr. Never set for
+ // dictionary maps.
+ HasChildrenSetFlag = 1 << 7,
+
+ // If set, this SharedPropMap was once converted to dictionary mode. This is
+ // only used for heuristics. Never set for dictionary maps.
+ HadDictionaryConversionFlag = 1 << 8,
+
+ // For SharedPropMap this stores the number of previous maps, clamped to
+ // NumPreviousMapsMax. This is used for heuristics.
+ NumPreviousMapsMax = 0x7f,
+ NumPreviousMapsShift = 9,
+ NumPreviousMapsMask = NumPreviousMapsMax << NumPreviousMapsShift,
+ };
+
+ // Flags word, stored in the cell header. Note that this hides the
+ // Cell::flags() method.
+ uintptr_t flags() const { return headerFlagsField(); }
+
+ private:
+ GCPtr<PropertyKey> keys_[Capacity];
+
+ protected:
+ PropMap() = default;
+
+ void initKey(uint32_t index, PropertyKey key) {
+ MOZ_ASSERT(index < Capacity);
+ keys_[index].init(key);
+ }
+ void setKey(uint32_t index, PropertyKey key) {
+ MOZ_ASSERT(index < Capacity);
+ keys_[index] = key;
+ }
+
+ public:
+ bool isCompact() const { return flags() & IsCompactFlag; }
+ bool isLinked() const { return !isCompact(); }
+ bool isDictionary() const { return flags() & IsDictionaryFlag; }
+ bool isShared() const { return !isDictionary(); }
+ bool isNormal() const { return isShared() && !isCompact(); }
+
+ bool hasPrevious() const { return flags() & HasPrevFlag; }
+ bool canHaveTable() const { return flags() & CanHaveTableFlag; }
+
+ inline CompactPropMap* asCompact();
+ inline const CompactPropMap* asCompact() const;
+
+ inline LinkedPropMap* asLinked();
+ inline const LinkedPropMap* asLinked() const;
+
+ inline NormalPropMap* asNormal();
+ inline const NormalPropMap* asNormal() const;
+
+ inline SharedPropMap* asShared();
+ inline const SharedPropMap* asShared() const;
+
+ inline DictionaryPropMap* asDictionary();
+ inline const DictionaryPropMap* asDictionary() const;
+
+ bool hasKey(uint32_t index) const {
+ MOZ_ASSERT(index < Capacity);
+ return !keys_[index].isVoid();
+ }
+ PropertyKey getKey(uint32_t index) const {
+ MOZ_ASSERT(index < Capacity);
+ return keys_[index];
+ }
+
+ uint32_t approximateEntryCount() const;
+
+#ifdef DEBUG
+ void dump(js::GenericPrinter& out) const;
+ void dump() const;
+ void checkConsistency(NativeObject* obj) const;
+#endif
+
+ void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* children, size_t* tables) const;
+
+ inline PropertyInfo getPropertyInfo(uint32_t index) const;
+
+ PropertyInfoWithKey getPropertyInfoWithKey(uint32_t index) const {
+ return PropertyInfoWithKey(getPropertyInfo(index), getKey(index));
+ }
+
+ MOZ_ALWAYS_INLINE PropMap* lookupLinear(uint32_t mapLength, PropertyKey key,
+ uint32_t* index);
+
+ MOZ_ALWAYS_INLINE PropMap* lookupPure(uint32_t mapLength, PropertyKey key,
+ uint32_t* index);
+
+ MOZ_ALWAYS_INLINE PropMap* lookup(JSContext* cx, uint32_t mapLength,
+ PropertyKey key, uint32_t* index);
+
+ static inline bool lookupForRemove(JSContext* cx, PropMap* map,
+ uint32_t mapLength, PropertyKey key,
+ const AutoKeepPropMapTables& keep,
+ PropMap** propMap, uint32_t* propIndex,
+ PropMapTable** table,
+ PropMapTable::Ptr* ptr);
+
+ static const JS::TraceKind TraceKind = JS::TraceKind::PropMap;
+
+ void traceChildren(JSTracer* trc);
+};
+
+class SharedPropMap : public PropMap {
+ friend class PropMap;
+
+ protected:
+ // Shared maps are stored in a tree structure. Each shared map has a TreeData
+ // struct linking the map to its parent and children. Initial maps (the ones
+ // stored in ShapeZone's initialPropMaps table) don't have a parent.
+ struct TreeData {
+ SharedChildrenPtr children;
+ SharedPropMapAndIndex parent;
+
+ void setParent(SharedPropMap* map, uint32_t index) {
+ parent = SharedPropMapAndIndex(map, index);
+ }
+ };
+
+ private:
+ static SharedPropMap* create(JSContext* cx, Handle<SharedPropMap*> prev,
+ HandleId id, PropertyInfo prop);
+ static SharedPropMap* createInitial(JSContext* cx, HandleId id,
+ PropertyInfo prop);
+ static SharedPropMap* clone(JSContext* cx, Handle<SharedPropMap*> map,
+ uint32_t length);
+
+ inline void initProperty(uint32_t index, PropertyKey key, PropertyInfo prop);
+
+ static bool addPropertyInternal(JSContext* cx,
+ MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength, HandleId id,
+ PropertyInfo prop);
+
+ bool addChild(JSContext* cx, SharedPropMapAndIndex child, HandleId id,
+ PropertyInfo prop);
+ SharedPropMap* lookupChild(uint32_t length, HandleId id, PropertyInfo prop);
+
+ protected:
+ void initNumPreviousMaps(uint32_t value) {
+ MOZ_ASSERT((flags() >> NumPreviousMapsShift) == 0);
+ // Clamp to NumPreviousMapsMax. This is okay because this value is only used
+ // for heuristics.
+ if (value > NumPreviousMapsMax) {
+ value = NumPreviousMapsMax;
+ }
+ setHeaderFlagBits(value << NumPreviousMapsShift);
+ }
+
+ bool hasChildrenSet() const { return flags() & HasChildrenSetFlag; }
+ void setHasChildrenSet() { setHeaderFlagBits(HasChildrenSetFlag); }
+ void clearHasChildrenSet() { clearHeaderFlagBits(HasChildrenSetFlag); }
+
+ void setHadDictionaryConversion() {
+ setHeaderFlagBits(HadDictionaryConversionFlag);
+ }
+
+ public:
+ // Heuristics used when adding a property via NativeObject::addProperty and
+ // friends:
+ //
+ // * If numPreviousMaps >= NumPrevMapsForAddConsiderDictionary, consider
+ // converting the object to a dictionary object based on other heuristics.
+ //
+ // * If numPreviousMaps >= NumPrevMapsForAddAlwaysDictionary, always convert
+ // the object to a dictionary object.
+ static constexpr size_t NumPrevMapsConsiderDictionary = 32;
+ static constexpr size_t NumPrevMapsAlwaysDictionary = 100;
+
+ static_assert(NumPrevMapsConsiderDictionary < NumPreviousMapsMax);
+ static_assert(NumPrevMapsAlwaysDictionary < NumPreviousMapsMax);
+
+ // The number of properties that can definitely be added to an object without
+ // triggering dictionary mode conversion in NativeObject::addProperty.
+ static constexpr size_t MaxPropsForNonDictionary =
+ NumPrevMapsConsiderDictionary * Capacity;
+
+ bool isDictionary() const = delete;
+ bool isShared() const = delete;
+ SharedPropMap* asShared() = delete;
+ const SharedPropMap* asShared() const = delete;
+
+ bool hadDictionaryConversion() const {
+ return flags() & HadDictionaryConversionFlag;
+ }
+
+ uint32_t numPreviousMaps() const {
+ uint32_t val = (flags() & NumPreviousMapsMask) >> NumPreviousMapsShift;
+ MOZ_ASSERT_IF(hasPrevious(), val > 0);
+ return val;
+ }
+
+ MOZ_ALWAYS_INLINE bool shouldConvertToDictionaryForAdd() const;
+
+ void fixupAfterMovingGC();
+ inline void sweep(JS::GCContext* gcx);
+ inline void finalize(JS::GCContext* gcx);
+
+ static inline void getPrevious(MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength);
+
+ bool matchProperty(uint32_t index, PropertyKey key, PropertyInfo prop) const {
+ return getKey(index) == key && getPropertyInfo(index) == prop;
+ }
+
+ inline TreeData& treeDataRef();
+ inline const TreeData& treeDataRef() const;
+
+ void removeChild(JS::GCContext* gcx, SharedPropMap* child);
+
+ uint32_t lastUsedSlot(uint32_t mapLength) const {
+ return getPropertyInfo(mapLength - 1).maybeSlot();
+ }
+
+ // Number of slots required for objects with this map/mapLength.
+ static uint32_t slotSpan(const JSClass* clasp, const SharedPropMap* map,
+ uint32_t mapLength) {
+ MOZ_ASSERT(clasp->isNativeObject());
+ uint32_t numReserved = JSCLASS_RESERVED_SLOTS(clasp);
+ if (!map) {
+ MOZ_ASSERT(mapLength == 0);
+ return numReserved;
+ }
+ uint32_t lastSlot = map->lastUsedSlot(mapLength);
+ if (lastSlot == SHAPE_INVALID_SLOT) {
+ // The object only has custom data properties.
+ return numReserved;
+ }
+ // Some builtin objects store properties in reserved slots. Make sure the
+ // slot span >= numReserved. See addPropertyInReservedSlot.
+ return std::max(lastSlot + 1, numReserved);
+ }
+
+ static uint32_t indexOfNextProperty(uint32_t index) {
+ MOZ_ASSERT(index < PropMap::Capacity);
+ return (index + 1) % PropMap::Capacity;
+ }
+
+ // Add a new property to this map. Returns the new map/mapLength, slot number,
+ // and object flags.
+ static bool addProperty(JSContext* cx, const JSClass* clasp,
+ MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength, HandleId id, PropertyFlags flags,
+ ObjectFlags* objectFlags, uint32_t* slot);
+
+ // Like addProperty, but for when the slot number is a reserved slot. A few
+ // builtin objects use this for initial properties.
+ static bool addPropertyInReservedSlot(JSContext* cx, const JSClass* clasp,
+ MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength, HandleId id,
+ PropertyFlags flags, uint32_t slot,
+ ObjectFlags* objectFlags);
+
+ // Like addProperty, but for when the caller already knows the slot number to
+ // use (or wants to assert this exact slot number is used).
+ static bool addPropertyWithKnownSlot(JSContext* cx, const JSClass* clasp,
+ MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength, HandleId id,
+ PropertyFlags flags, uint32_t slot,
+ ObjectFlags* objectFlags);
+
+ // Like addProperty, but for adding a custom data property.
+ static bool addCustomDataProperty(JSContext* cx, const JSClass* clasp,
+ MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength, HandleId id,
+ PropertyFlags flags,
+ ObjectFlags* objectFlags);
+
+ // Freeze or seal all properties by creating a new shared map. Returns the new
+ // map and object flags.
+ static bool freezeOrSealProperties(JSContext* cx, IntegrityLevel level,
+ const JSClass* clasp,
+ MutableHandle<SharedPropMap*> map,
+ uint32_t mapLength,
+ ObjectFlags* objectFlags);
+
+ // Create a new dictionary map as copy of this map.
+ static DictionaryPropMap* toDictionaryMap(JSContext* cx,
+ Handle<SharedPropMap*> map,
+ uint32_t length);
+};
+
+class CompactPropMap final : public SharedPropMap {
+ CompactPropertyInfo propInfos_[Capacity];
+ TreeData treeData_;
+
+ friend class PropMap;
+ friend class SharedPropMap;
+ friend class DictionaryPropMap;
+ friend class js::gc::CellAllocator;
+
+ CompactPropMap(JS::Handle<PropertyKey> key, PropertyInfo prop) {
+ setHeaderFlagBits(IsCompactFlag);
+ initProperty(0, key, prop);
+ }
+
+ CompactPropMap(JS::Handle<CompactPropMap*> orig, uint32_t length) {
+ setHeaderFlagBits(IsCompactFlag);
+ for (uint32_t i = 0; i < length; i++) {
+ initKey(i, orig->getKey(i));
+ propInfos_[i] = orig->propInfos_[i];
+ }
+ }
+
+ void initProperty(uint32_t index, PropertyKey key, PropertyInfo prop) {
+ MOZ_ASSERT(!hasKey(index));
+ initKey(index, key);
+ propInfos_[index] = CompactPropertyInfo(prop);
+ }
+
+ TreeData& treeDataRef() { return treeData_; }
+ const TreeData& treeDataRef() const { return treeData_; }
+
+ public:
+ bool isDictionary() const = delete;
+ bool isShared() const = delete;
+ bool isCompact() const = delete;
+ bool isNormal() const = delete;
+ bool isLinked() const = delete;
+ CompactPropMap* asCompact() = delete;
+ const CompactPropMap* asCompact() const = delete;
+
+ PropertyInfo getPropertyInfo(uint32_t index) const {
+ MOZ_ASSERT(hasKey(index));
+ return PropertyInfo(propInfos_[index]);
+ }
+};
+
+// Layout shared by NormalPropMap and DictionaryPropMap.
+class LinkedPropMap final : public PropMap {
+ friend class PropMap;
+ friend class SharedPropMap;
+ friend class NormalPropMap;
+ friend class DictionaryPropMap;
+
+ struct Data {
+ GCPtr<PropMap*> previous;
+ PropMapTable* table = nullptr;
+ PropertyInfo propInfos[Capacity];
+
+ explicit Data(PropMap* prev) : previous(prev) {}
+ };
+ Data data_;
+
+ bool createTable(JSContext* cx);
+ void handOffTableTo(LinkedPropMap* next);
+
+ public:
+ bool isCompact() const = delete;
+ bool isLinked() const = delete;
+ LinkedPropMap* asLinked() = delete;
+ const LinkedPropMap* asLinked() const = delete;
+
+ PropMap* previous() const { return data_.previous; }
+
+ bool hasTable() const { return data_.table != nullptr; }
+
+ PropMapTable* maybeTable(JS::AutoCheckCannotGC& nogc) const {
+ return data_.table;
+ }
+ PropMapTable* ensureTable(JSContext* cx, const JS::AutoCheckCannotGC& nogc) {
+ if (!data_.table && MOZ_UNLIKELY(!createTable(cx))) {
+ return nullptr;
+ }
+ return data_.table;
+ }
+ PropMapTable* ensureTable(JSContext* cx, const AutoKeepPropMapTables& keep) {
+ if (!data_.table && MOZ_UNLIKELY(!createTable(cx))) {
+ return nullptr;
+ }
+ return data_.table;
+ }
+
+ void purgeTable(JS::GCContext* gcx);
+
+ void purgeTableCache() {
+ if (data_.table) {
+ data_.table->purgeCache();
+ }
+ }
+
+#ifdef DEBUG
+ bool canSkipMarkingTable();
+#endif
+
+ PropertyInfo getPropertyInfo(uint32_t index) const {
+ MOZ_ASSERT(hasKey(index));
+ return data_.propInfos[index];
+ }
+};
+
+class NormalPropMap final : public SharedPropMap {
+ friend class PropMap;
+ friend class SharedPropMap;
+ friend class DictionaryPropMap;
+ friend class js::gc::CellAllocator;
+
+ LinkedPropMap::Data linkedData_;
+ TreeData treeData_;
+
+ NormalPropMap(JS::Handle<SharedPropMap*> prev, PropertyKey key,
+ PropertyInfo prop)
+ : linkedData_(prev) {
+ if (prev) {
+ setHeaderFlagBits(HasPrevFlag);
+ initNumPreviousMaps(prev->numPreviousMaps() + 1);
+ if (prev->hasPrevious()) {
+ setHeaderFlagBits(CanHaveTableFlag);
+ }
+ }
+ initProperty(0, key, prop);
+ }
+
+ NormalPropMap(JS::Handle<NormalPropMap*> orig, uint32_t length)
+ : linkedData_(orig->previous()) {
+ if (orig->hasPrevious()) {
+ setHeaderFlagBits(HasPrevFlag);
+ }
+ if (orig->canHaveTable()) {
+ setHeaderFlagBits(CanHaveTableFlag);
+ }
+ initNumPreviousMaps(orig->numPreviousMaps());
+ for (uint32_t i = 0; i < length; i++) {
+ initProperty(i, orig->getKey(i), orig->getPropertyInfo(i));
+ }
+ }
+
+ void initProperty(uint32_t index, PropertyKey key, PropertyInfo prop) {
+ MOZ_ASSERT(!hasKey(index));
+ initKey(index, key);
+ linkedData_.propInfos[index] = prop;
+ }
+
+ bool isDictionary() const = delete;
+ bool isShared() const = delete;
+ bool isCompact() const = delete;
+ bool isNormal() const = delete;
+ bool isLinked() const = delete;
+ NormalPropMap* asNormal() = delete;
+ const NormalPropMap* asNormal() const = delete;
+
+ SharedPropMap* previous() const {
+ return static_cast<SharedPropMap*>(linkedData_.previous.get());
+ }
+
+ TreeData& treeDataRef() { return treeData_; }
+ const TreeData& treeDataRef() const { return treeData_; }
+
+ static void staticAsserts() {
+ static_assert(offsetof(NormalPropMap, linkedData_) ==
+ offsetof(LinkedPropMap, data_));
+ }
+};
+
+class DictionaryPropMap final : public PropMap {
+ friend class PropMap;
+ friend class SharedPropMap;
+ friend class js::gc::CellAllocator;
+
+ LinkedPropMap::Data linkedData_;
+
+ // SHAPE_INVALID_SLOT or head of slot freelist in owning dictionary-mode
+ // object.
+ uint32_t freeList_ = SHAPE_INVALID_SLOT;
+
+ // Number of holes for removed properties in this and previous maps. Used by
+ // compacting heuristics.
+ uint32_t holeCount_ = 0;
+
+ DictionaryPropMap(JS::Handle<DictionaryPropMap*> prev, PropertyKey key,
+ PropertyInfo prop)
+ : linkedData_(prev) {
+ setHeaderFlagBits(IsDictionaryFlag | CanHaveTableFlag |
+ (prev ? HasPrevFlag : 0));
+ initProperty(0, key, prop);
+ }
+
+ DictionaryPropMap(JS::Handle<NormalPropMap*> orig, uint32_t length)
+ : linkedData_(nullptr) {
+ setHeaderFlagBits(IsDictionaryFlag | CanHaveTableFlag);
+ for (uint32_t i = 0; i < length; i++) {
+ initProperty(i, orig->getKey(i), orig->getPropertyInfo(i));
+ }
+ }
+
+ DictionaryPropMap(JS::Handle<CompactPropMap*> orig, uint32_t length)
+ : linkedData_(nullptr) {
+ setHeaderFlagBits(IsDictionaryFlag | CanHaveTableFlag);
+ for (uint32_t i = 0; i < length; i++) {
+ initProperty(i, orig->getKey(i), orig->getPropertyInfo(i));
+ }
+ }
+
+ void initProperty(uint32_t index, PropertyKey key, PropertyInfo prop) {
+ MOZ_ASSERT(!hasKey(index));
+ initKey(index, key);
+ linkedData_.propInfos[index] = prop;
+ }
+
+ void initPrevious(DictionaryPropMap* prev) {
+ MOZ_ASSERT(prev);
+ linkedData_.previous.init(prev);
+ setHeaderFlagBits(HasPrevFlag);
+ }
+ void clearPrevious() {
+ linkedData_.previous = nullptr;
+ clearHeaderFlagBits(HasPrevFlag);
+ }
+
+ void clearProperty(uint32_t index) { setKey(index, PropertyKey::Void()); }
+
+ static void skipTrailingHoles(MutableHandle<DictionaryPropMap*> map,
+ uint32_t* mapLength);
+
+ void handOffLastMapStateTo(DictionaryPropMap* newLast);
+
+ void incHoleCount() { holeCount_++; }
+ void decHoleCount() {
+ MOZ_ASSERT(holeCount_ > 0);
+ holeCount_--;
+ }
+ static void maybeCompact(JSContext* cx, MutableHandle<DictionaryPropMap*> map,
+ uint32_t* mapLength);
+
+ public:
+ bool isDictionary() const = delete;
+ bool isShared() const = delete;
+ bool isCompact() const = delete;
+ bool isNormal() const = delete;
+ bool isLinked() const = delete;
+ DictionaryPropMap* asDictionary() = delete;
+ const DictionaryPropMap* asDictionary() const = delete;
+
+ void fixupAfterMovingGC() {}
+ inline void finalize(JS::GCContext* gcx);
+
+ DictionaryPropMap* previous() const {
+ return static_cast<DictionaryPropMap*>(linkedData_.previous.get());
+ }
+
+ uint32_t freeList() const { return freeList_; }
+ void setFreeList(uint32_t slot) { freeList_ = slot; }
+
+ PropertyInfo getPropertyInfo(uint32_t index) const {
+ MOZ_ASSERT(hasKey(index));
+ return linkedData_.propInfos[index];
+ }
+
+ // Add a new property to this map. Returns the new map/mapLength and object
+ // flags. The caller is responsible for generating a new dictionary shape.
+ static bool addProperty(JSContext* cx, const JSClass* clasp,
+ MutableHandle<DictionaryPropMap*> map,
+ uint32_t* mapLength, HandleId id, PropertyFlags flags,
+ uint32_t slot, ObjectFlags* objectFlags);
+
+ // Remove the property referenced by the table pointer. Returns the new
+ // map/mapLength. The caller is responsible for generating a new dictionary
+ // shape.
+ static void removeProperty(JSContext* cx,
+ MutableHandle<DictionaryPropMap*> map,
+ uint32_t* mapLength, PropMapTable* table,
+ PropMapTable::Ptr& ptr);
+
+ // Turn all sparse elements into dense elements. The caller is responsible
+ // for checking all sparse elements are plain data properties and must
+ // generate a new shape for the object.
+ static void densifyElements(JSContext* cx,
+ MutableHandle<DictionaryPropMap*> map,
+ uint32_t* mapLength, NativeObject* obj);
+
+ // Freeze or seal all properties in this map. Returns the new object flags.
+ // The caller is responsible for generating a new shape for the object.
+ void freezeOrSealProperties(JSContext* cx, IntegrityLevel level,
+ const JSClass* clasp, uint32_t mapLength,
+ ObjectFlags* objectFlags);
+
+ // Change a property's slot number and/or flags and return the new object
+ // flags. The caller is responsible for generating a new shape.
+ void changeProperty(JSContext* cx, const JSClass* clasp, uint32_t index,
+ PropertyFlags flags, uint32_t slot,
+ ObjectFlags* objectFlags);
+
+ // Like changeProperty, but doesn't change the slot number.
+ void changePropertyFlags(JSContext* cx, const JSClass* clasp, uint32_t index,
+ PropertyFlags flags, ObjectFlags* objectFlags) {
+ uint32_t slot = getPropertyInfo(index).maybeSlot();
+ changeProperty(cx, clasp, index, flags, slot, objectFlags);
+ }
+
+ static void staticAsserts() {
+ static_assert(offsetof(DictionaryPropMap, linkedData_) ==
+ offsetof(LinkedPropMap, data_));
+ }
+};
+
+inline CompactPropMap* PropMap::asCompact() {
+ MOZ_ASSERT(isCompact());
+ return static_cast<CompactPropMap*>(this);
+}
+inline const CompactPropMap* PropMap::asCompact() const {
+ MOZ_ASSERT(isCompact());
+ return static_cast<const CompactPropMap*>(this);
+}
+inline LinkedPropMap* PropMap::asLinked() {
+ MOZ_ASSERT(isLinked());
+ return static_cast<LinkedPropMap*>(this);
+}
+inline const LinkedPropMap* PropMap::asLinked() const {
+ MOZ_ASSERT(isLinked());
+ return static_cast<const LinkedPropMap*>(this);
+}
+inline NormalPropMap* PropMap::asNormal() {
+ MOZ_ASSERT(isNormal());
+ return static_cast<NormalPropMap*>(this);
+}
+inline const NormalPropMap* PropMap::asNormal() const {
+ MOZ_ASSERT(isNormal());
+ return static_cast<const NormalPropMap*>(this);
+}
+inline SharedPropMap* PropMap::asShared() {
+ MOZ_ASSERT(isShared());
+ return static_cast<SharedPropMap*>(this);
+}
+inline const SharedPropMap* PropMap::asShared() const {
+ MOZ_ASSERT(isShared());
+ return static_cast<const SharedPropMap*>(this);
+}
+inline DictionaryPropMap* PropMap::asDictionary() {
+ MOZ_ASSERT(isDictionary());
+ return static_cast<DictionaryPropMap*>(this);
+}
+inline const DictionaryPropMap* PropMap::asDictionary() const {
+ MOZ_ASSERT(isDictionary());
+ return static_cast<const DictionaryPropMap*>(this);
+}
+
+inline PropertyInfo PropMap::getPropertyInfo(uint32_t index) const {
+ return isCompact() ? asCompact()->getPropertyInfo(index)
+ : asLinked()->getPropertyInfo(index);
+}
+
+inline SharedPropMap::TreeData& SharedPropMap::treeDataRef() {
+ return isCompact() ? asCompact()->treeDataRef() : asNormal()->treeDataRef();
+}
+
+inline const SharedPropMap::TreeData& SharedPropMap::treeDataRef() const {
+ return isCompact() ? asCompact()->treeDataRef() : asNormal()->treeDataRef();
+}
+
+inline void SharedPropMap::initProperty(uint32_t index, PropertyKey key,
+ PropertyInfo prop) {
+ if (isCompact()) {
+ asCompact()->initProperty(index, key, prop);
+ } else {
+ asNormal()->initProperty(index, key, prop);
+ }
+}
+
+template <typename T>
+inline PropertyInfo MapAndIndex<T>::propertyInfo() const {
+ MOZ_ASSERT(!isNone());
+ return map()->getPropertyInfo(index());
+}
+
+MOZ_ALWAYS_INLINE HashNumber PropMapTable::Hasher::hash(PropertyKey key) {
+ return HashPropertyKey(key);
+}
+MOZ_ALWAYS_INLINE bool PropMapTable::Hasher::match(PropMapAndIndex entry,
+ PropertyKey key) {
+ MOZ_ASSERT(entry.map()->hasKey(entry.index()));
+ return entry.map()->getKey(entry.index()) == key;
+}
+
+// Hash policy for SharedPropMap children.
+struct SharedChildrenHasher {
+ using Key = SharedPropMapAndIndex;
+
+ struct Lookup {
+ PropertyKey key;
+ PropertyInfo prop;
+ uint8_t index;
+
+ Lookup(PropertyKey key, PropertyInfo prop, uint8_t index)
+ : key(key), prop(prop), index(index) {}
+ Lookup(PropertyInfoWithKey prop, uint8_t index)
+ : key(prop.key()), prop(prop), index(index) {}
+ };
+
+ static HashNumber hash(const Lookup& l) {
+ HashNumber hash = HashPropertyKey(l.key);
+ return mozilla::AddToHash(hash, l.prop.toRaw(), l.index);
+ }
+ static bool match(SharedPropMapAndIndex k, const Lookup& l) {
+ SharedPropMap* map = k.map();
+ uint32_t index = k.index();
+ uint32_t newIndex = SharedPropMap::indexOfNextProperty(index);
+ return index == l.index && map->matchProperty(newIndex, l.key, l.prop);
+ }
+};
+
+} // namespace js
+
+// JS::ubi::Nodes can point to PropMaps; they're js::gc::Cell instances
+// with no associated compartment.
+namespace JS {
+namespace ubi {
+
+template <>
+class Concrete<js::PropMap> : TracerConcrete<js::PropMap> {
+ protected:
+ explicit Concrete(js::PropMap* ptr) : TracerConcrete<js::PropMap>(ptr) {}
+
+ public:
+ static void construct(void* storage, js::PropMap* ptr) {
+ new (storage) Concrete(ptr);
+ }
+
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override;
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+} // namespace ubi
+} // namespace JS
+
+#endif // vm_PropMap_h
diff --git a/js/src/vm/PropertyAndElement.cpp b/js/src/vm/PropertyAndElement.cpp
new file mode 100644
index 0000000000..05adbd44a6
--- /dev/null
+++ b/js/src/vm/PropertyAndElement.cpp
@@ -0,0 +1,995 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/PropertyAndElement.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint32_t
+
+#include "jsfriendapi.h" // js::GetPropertyKeys, JSITER_OWNONLY
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "js/CallArgs.h" // JSNative
+#include "js/Class.h" // JS::ObjectOpResult
+#include "js/Context.h" // AssertHeapIsIdle
+#include "js/GCVector.h" // JS::GCVector, JS::RootedVector
+#include "js/Id.h" // JS::PropertyKey, jsid
+#include "js/PropertyDescriptor.h" // JS::PropertyDescriptor, JSPROP_READONLY
+#include "js/PropertySpec.h" // JSNativeWrapper
+#include "js/RootingAPI.h" // JS::Rooted, JS::Handle, JS::MutableHandle
+#include "js/Value.h" // JS::Value, JS::*Value
+#include "vm/FunctionPrefixKind.h" // js::FunctionPrefixKind
+#include "vm/GlobalObject.h" // js::GlobalObject
+#include "vm/JSAtom.h" // JSAtom, js::Atomize, js::AtomizeChars
+#include "vm/JSContext.h" // JSContext, CHECK_THREAD
+#include "vm/JSFunction.h" // js::IdToFunctionName, js::DefineFunction
+#include "vm/JSObject.h" // JSObject, js::DefineFunctions
+#include "vm/ObjectOperations.h" // js::DefineProperty, js::DefineDataProperty, js::HasOwnProperty
+#include "vm/PropertyResult.h" // js::PropertyResult
+#include "vm/StringType.h" // js::PropertyName
+
+#include "vm/JSAtom-inl.h" // js::AtomToId, js::IndexToId
+#include "vm/JSContext-inl.h" // JSContext::check
+#include "vm/JSObject-inl.h" // js::NewBuiltinClassInstance
+#include "vm/NativeObject-inl.h" // js::NativeLookupOwnPropertyNoResolve
+#include "vm/ObjectOperations-inl.h" // js::GetProperty, js::GetElement, js::SetProperty, js::HasProperty, js::DeleteProperty, js::DeleteElement
+
+using namespace js;
+
+static bool DefinePropertyByDescriptor(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::Handle<JS::PropertyDescriptor> desc,
+ JS::ObjectOpResult& result) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, id, desc);
+ return js::DefineProperty(cx, obj, id, desc, result);
+}
+
+JS_PUBLIC_API bool JS_DefinePropertyById(
+ JSContext* cx, JS::Handle<JSObject*> obj, JS::Handle<jsid> id,
+ JS::Handle<JS::PropertyDescriptor> desc, JS::ObjectOpResult& result) {
+ return ::DefinePropertyByDescriptor(cx, obj, id, desc, result);
+}
+
+JS_PUBLIC_API bool JS_DefinePropertyById(
+ JSContext* cx, JS::Handle<JSObject*> obj, JS::Handle<jsid> id,
+ JS::Handle<JS::PropertyDescriptor> desc) {
+ JS::ObjectOpResult result;
+ return ::DefinePropertyByDescriptor(cx, obj, id, desc, result) &&
+ result.checkStrict(cx, obj, id);
+}
+
+static bool DefineDataPropertyById(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::Handle<JS::Value> value,
+ unsigned attrs) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, id, value);
+
+ return js::DefineDataProperty(cx, obj, id, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefinePropertyById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::Handle<JS::Value> value,
+ unsigned attrs) {
+ return ::DefineDataPropertyById(cx, obj, id, value, attrs);
+}
+
+static bool DefineAccessorPropertyById(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::Handle<JSObject*> getter,
+ JS::Handle<JSObject*> setter,
+ unsigned attrs) {
+ // JSPROP_READONLY has no meaning when accessors are involved. Ideally we'd
+ // throw if this happens, but we've accepted it for long enough that it's
+ // not worth trying to make callers change their ways. Just flip it off on
+ // its way through the API layer so that we can enforce this internally.
+ attrs &= ~JSPROP_READONLY;
+
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, id, getter, setter);
+
+ return js::DefineAccessorProperty(cx, obj, id, getter, setter, attrs);
+}
+
+static bool DefineAccessorPropertyById(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ const JSNativeWrapper& get,
+ const JSNativeWrapper& set,
+ unsigned attrs) {
+ // Getter/setter are both possibly-null JSNatives. Wrap them in JSFunctions.
+
+ JS::Rooted<JSFunction*> getter(cx);
+ if (get.op) {
+ JS::Rooted<JSAtom*> atom(cx,
+ IdToFunctionName(cx, id, FunctionPrefixKind::Get));
+ if (!atom) {
+ return false;
+ }
+ getter = NewNativeFunction(cx, get.op, 0, atom);
+ if (!getter) {
+ return false;
+ }
+
+ if (get.info) {
+ getter->setJitInfo(get.info);
+ }
+ }
+
+ JS::Rooted<JSFunction*> setter(cx);
+ if (set.op) {
+ JS::Rooted<JSAtom*> atom(cx,
+ IdToFunctionName(cx, id, FunctionPrefixKind::Set));
+ if (!atom) {
+ return false;
+ }
+ setter = NewNativeFunction(cx, set.op, 1, atom);
+ if (!setter) {
+ return false;
+ }
+
+ if (set.info) {
+ setter->setJitInfo(set.info);
+ }
+ }
+
+ return ::DefineAccessorPropertyById(cx, obj, id, getter, setter, attrs);
+}
+
+/*
+ * Wrapper functions to create wrappers with no corresponding JSJitInfo from API
+ * function arguments.
+ */
+static JSNativeWrapper NativeOpWrapper(Native native) {
+ JSNativeWrapper ret;
+ ret.op = native;
+ ret.info = nullptr;
+ return ret;
+}
+
+JS_PUBLIC_API bool JS_DefinePropertyById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, JSNative getter,
+ JSNative setter, unsigned attrs) {
+ return ::DefineAccessorPropertyById(cx, obj, id, ::NativeOpWrapper(getter),
+ ::NativeOpWrapper(setter), attrs);
+}
+
+JS_PUBLIC_API bool JS_DefinePropertyById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::Handle<JSObject*> getter,
+ JS::Handle<JSObject*> setter,
+ unsigned attrs) {
+ return ::DefineAccessorPropertyById(cx, obj, id, getter, setter, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefinePropertyById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::Handle<JSObject*> valueArg,
+ unsigned attrs) {
+ JS::Rooted<JS::Value> value(cx, JS::ObjectValue(*valueArg));
+ return ::DefineDataPropertyById(cx, obj, id, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefinePropertyById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ HandleString valueArg,
+ unsigned attrs) {
+ JS::Rooted<JS::Value> value(cx, JS::StringValue(valueArg));
+ return ::DefineDataPropertyById(cx, obj, id, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefinePropertyById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, int32_t valueArg,
+ unsigned attrs) {
+ JS::Value value = JS::Int32Value(valueArg);
+ return ::DefineDataPropertyById(
+ cx, obj, id, JS::Handle<JS::Value>::fromMarkedLocation(&value), attrs);
+}
+
+JS_PUBLIC_API bool JS_DefinePropertyById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, uint32_t valueArg,
+ unsigned attrs) {
+ JS::Value value = JS::NumberValue(valueArg);
+ return ::DefineDataPropertyById(
+ cx, obj, id, JS::Handle<JS::Value>::fromMarkedLocation(&value), attrs);
+}
+
+JS_PUBLIC_API bool JS_DefinePropertyById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, double valueArg,
+ unsigned attrs) {
+ JS::Value value = JS::NumberValue(valueArg);
+ return ::DefineDataPropertyById(
+ cx, obj, id, JS::Handle<JS::Value>::fromMarkedLocation(&value), attrs);
+}
+
+static bool DefineDataProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name, JS::Handle<JS::Value> value,
+ unsigned attrs) {
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+
+ return ::DefineDataPropertyById(cx, obj, id, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name,
+ JS::Handle<JS::Value> value,
+ unsigned attrs) {
+ return ::DefineDataProperty(cx, obj, name, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name, JSNative getter,
+ JSNative setter, unsigned attrs) {
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return ::DefineAccessorPropertyById(cx, obj, id, ::NativeOpWrapper(getter),
+ ::NativeOpWrapper(setter), attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name,
+ JS::Handle<JSObject*> getter,
+ JS::Handle<JSObject*> setter,
+ unsigned attrs) {
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+
+ return ::DefineAccessorPropertyById(cx, obj, id, getter, setter, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name,
+ JS::Handle<JSObject*> valueArg,
+ unsigned attrs) {
+ JS::Rooted<JS::Value> value(cx, JS::ObjectValue(*valueArg));
+ return ::DefineDataProperty(cx, obj, name, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name, HandleString valueArg,
+ unsigned attrs) {
+ JS::Rooted<JS::Value> value(cx, JS::StringValue(valueArg));
+ return ::DefineDataProperty(cx, obj, name, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name, int32_t valueArg,
+ unsigned attrs) {
+ JS::Value value = JS::Int32Value(valueArg);
+ return ::DefineDataProperty(
+ cx, obj, name, JS::Handle<JS::Value>::fromMarkedLocation(&value), attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name, uint32_t valueArg,
+ unsigned attrs) {
+ JS::Value value = JS::NumberValue(valueArg);
+ return ::DefineDataProperty(
+ cx, obj, name, JS::Handle<JS::Value>::fromMarkedLocation(&value), attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name, double valueArg,
+ unsigned attrs) {
+ JS::Value value = JS::NumberValue(valueArg);
+ return ::DefineDataProperty(
+ cx, obj, name, JS::Handle<JS::Value>::fromMarkedLocation(&value), attrs);
+}
+
+#define AUTO_NAMELEN(s, n) (((n) == (size_t)-1) ? js_strlen(s) : (n))
+
+JS_PUBLIC_API bool JS_DefineUCProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ JS::Handle<JS::PropertyDescriptor> desc,
+ JS::ObjectOpResult& result) {
+ JSAtom* atom = AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return ::DefinePropertyByDescriptor(cx, obj, id, desc, result);
+}
+
+JS_PUBLIC_API bool JS_DefineUCProperty(
+ JSContext* cx, JS::Handle<JSObject*> obj, const char16_t* name,
+ size_t namelen, JS::Handle<JS::PropertyDescriptor> desc) {
+ JSAtom* atom = AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ JS::ObjectOpResult result;
+ return ::DefinePropertyByDescriptor(cx, obj, id, desc, result) &&
+ result.checkStrict(cx, obj, id);
+}
+
+static bool DefineUCDataProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ JS::Handle<JS::Value> value, unsigned attrs) {
+ JSAtom* atom = AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return ::DefineDataPropertyById(cx, obj, id, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineUCProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ JS::Handle<JS::Value> value,
+ unsigned attrs) {
+ return ::DefineUCDataProperty(cx, obj, name, namelen, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineUCProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ JS::Handle<JSObject*> getter,
+ JS::Handle<JSObject*> setter,
+ unsigned attrs) {
+ JSAtom* atom = AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return ::DefineAccessorPropertyById(cx, obj, id, getter, setter, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineUCProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ JS::Handle<JSObject*> valueArg,
+ unsigned attrs) {
+ JS::Rooted<JS::Value> value(cx, JS::ObjectValue(*valueArg));
+ return ::DefineUCDataProperty(cx, obj, name, namelen, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineUCProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ HandleString valueArg, unsigned attrs) {
+ JS::Rooted<JS::Value> value(cx, JS::StringValue(valueArg));
+ return ::DefineUCDataProperty(cx, obj, name, namelen, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineUCProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ int32_t valueArg, unsigned attrs) {
+ JS::Value value = JS::Int32Value(valueArg);
+ return ::DefineUCDataProperty(
+ cx, obj, name, namelen, JS::Handle<JS::Value>::fromMarkedLocation(&value),
+ attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineUCProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ uint32_t valueArg, unsigned attrs) {
+ JS::Value value = JS::NumberValue(valueArg);
+ return ::DefineUCDataProperty(
+ cx, obj, name, namelen, JS::Handle<JS::Value>::fromMarkedLocation(&value),
+ attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineUCProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ double valueArg, unsigned attrs) {
+ JS::Value value = JS::NumberValue(valueArg);
+ return ::DefineUCDataProperty(
+ cx, obj, name, namelen, JS::Handle<JS::Value>::fromMarkedLocation(&value),
+ attrs);
+}
+
+extern bool PropertySpecNameToId(JSContext* cx, JSPropertySpec::Name name,
+ MutableHandleId id);
+
+static bool DefineSelfHostedProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ const char* getterName,
+ const char* setterName, unsigned attrs) {
+ JSAtom* getterNameAtom = Atomize(cx, getterName, strlen(getterName));
+ if (!getterNameAtom) {
+ return false;
+ }
+ JS::Rooted<PropertyName*> getterNameName(cx,
+ getterNameAtom->asPropertyName());
+
+ JS::Rooted<JSAtom*> name(cx, IdToFunctionName(cx, id));
+ if (!name) {
+ return false;
+ }
+
+ JS::Rooted<JS::Value> getterValue(cx);
+ if (!GlobalObject::getSelfHostedFunction(cx, cx->global(), getterNameName,
+ name, 0, &getterValue)) {
+ return false;
+ }
+ MOZ_ASSERT(getterValue.isObject() && getterValue.toObject().is<JSFunction>());
+ JS::Rooted<JSFunction*> getterFunc(cx,
+ &getterValue.toObject().as<JSFunction>());
+
+ JS::Rooted<JSFunction*> setterFunc(cx);
+ if (setterName) {
+ JSAtom* setterNameAtom = Atomize(cx, setterName, strlen(setterName));
+ if (!setterNameAtom) {
+ return false;
+ }
+ JS::Rooted<PropertyName*> setterNameName(cx,
+ setterNameAtom->asPropertyName());
+
+ JS::Rooted<JS::Value> setterValue(cx);
+ if (!GlobalObject::getSelfHostedFunction(cx, cx->global(), setterNameName,
+ name, 1, &setterValue)) {
+ return false;
+ }
+ MOZ_ASSERT(setterValue.isObject() &&
+ setterValue.toObject().is<JSFunction>());
+ setterFunc = &setterValue.toObject().as<JSFunction>();
+ }
+
+ return ::DefineAccessorPropertyById(cx, obj, id, getterFunc, setterFunc,
+ attrs);
+}
+
+static bool DefineDataElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, JS::Handle<JS::Value> value,
+ unsigned attrs) {
+ cx->check(obj, value);
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ JS::Rooted<jsid> id(cx);
+ if (!IndexToId(cx, index, &id)) {
+ return false;
+ }
+ return ::DefineDataPropertyById(cx, obj, id, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, JS::Handle<JS::Value> value,
+ unsigned attrs) {
+ return ::DefineDataElement(cx, obj, index, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index,
+ JS::Handle<JSObject*> getter,
+ JS::Handle<JSObject*> setter,
+ unsigned attrs) {
+ JS::Rooted<jsid> id(cx);
+ if (!IndexToId(cx, index, &id)) {
+ return false;
+ }
+ return ::DefineAccessorPropertyById(cx, obj, id, getter, setter, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index,
+ JS::Handle<JSObject*> valueArg,
+ unsigned attrs) {
+ JS::Rooted<JS::Value> value(cx, JS::ObjectValue(*valueArg));
+ return ::DefineDataElement(cx, obj, index, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, HandleString valueArg,
+ unsigned attrs) {
+ JS::Rooted<JS::Value> value(cx, JS::StringValue(valueArg));
+ return ::DefineDataElement(cx, obj, index, value, attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, int32_t valueArg,
+ unsigned attrs) {
+ JS::Value value = JS::Int32Value(valueArg);
+ return ::DefineDataElement(
+ cx, obj, index, JS::Handle<JS::Value>::fromMarkedLocation(&value), attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, uint32_t valueArg,
+ unsigned attrs) {
+ JS::Value value = JS::NumberValue(valueArg);
+ return ::DefineDataElement(
+ cx, obj, index, JS::Handle<JS::Value>::fromMarkedLocation(&value), attrs);
+}
+
+JS_PUBLIC_API bool JS_DefineElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, double valueArg,
+ unsigned attrs) {
+ JS::Value value = JS::NumberValue(valueArg);
+ return ::DefineDataElement(
+ cx, obj, index, JS::Handle<JS::Value>::fromMarkedLocation(&value), attrs);
+}
+
+JS_PUBLIC_API bool JS_HasPropertyById(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, bool* foundp) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, id);
+
+ return js::HasProperty(cx, obj, id, foundp);
+}
+
+JS_PUBLIC_API bool JS_HasProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name, bool* foundp) {
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_HasPropertyById(cx, obj, id, foundp);
+}
+
+JS_PUBLIC_API bool JS_HasUCProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ bool* foundp) {
+ JSAtom* atom = AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_HasPropertyById(cx, obj, id, foundp);
+}
+
+JS_PUBLIC_API bool JS_HasElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, bool* foundp) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ JS::Rooted<jsid> id(cx);
+ if (!IndexToId(cx, index, &id)) {
+ return false;
+ }
+ return JS_HasPropertyById(cx, obj, id, foundp);
+}
+
+JS_PUBLIC_API bool JS_HasOwnPropertyById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id, bool* foundp) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, id);
+
+ return js::HasOwnProperty(cx, obj, id, foundp);
+}
+
+JS_PUBLIC_API bool JS_HasOwnProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name, bool* foundp) {
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_HasOwnPropertyById(cx, obj, id, foundp);
+}
+
+JS_PUBLIC_API bool JS_ForwardGetPropertyTo(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::Handle<JS::Value> receiver,
+ JS::MutableHandle<JS::Value> vp) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, id, receiver);
+
+ return js::GetProperty(cx, obj, receiver, id, vp);
+}
+
+JS_PUBLIC_API bool JS_ForwardGetElementTo(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ uint32_t index,
+ JS::Handle<JSObject*> receiver,
+ JS::MutableHandle<JS::Value> vp) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj);
+
+ return js::GetElement(cx, obj, receiver, index, vp);
+}
+
+JS_PUBLIC_API bool JS_GetPropertyById(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::MutableHandle<JS::Value> vp) {
+ JS::Rooted<JS::Value> receiver(cx, JS::ObjectValue(*obj));
+ return JS_ForwardGetPropertyTo(cx, obj, id, receiver, vp);
+}
+
+JS_PUBLIC_API bool JS_GetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name,
+ JS::MutableHandle<JS::Value> vp) {
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_GetPropertyById(cx, obj, id, vp);
+}
+
+JS_PUBLIC_API bool JS_GetUCProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ JS::MutableHandle<JS::Value> vp) {
+ JSAtom* atom = AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_GetPropertyById(cx, obj, id, vp);
+}
+
+JS_PUBLIC_API bool JS_GetElement(JSContext* cx, JS::Handle<JSObject*> objArg,
+ uint32_t index,
+ JS::MutableHandle<JS::Value> vp) {
+ return JS_ForwardGetElementTo(cx, objArg, index, objArg, vp);
+}
+
+JS_PUBLIC_API bool JS_ForwardSetPropertyTo(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::Handle<JS::Value> v,
+ JS::Handle<JS::Value> receiver,
+ JS::ObjectOpResult& result) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, id, v, receiver);
+
+ return js::SetProperty(cx, obj, id, v, receiver, result);
+}
+
+JS_PUBLIC_API bool JS_SetPropertyById(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::Handle<JS::Value> v) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, id, v);
+
+ JS::Rooted<JS::Value> receiver(cx, JS::ObjectValue(*obj));
+ JS::ObjectOpResult ignored;
+ return js::SetProperty(cx, obj, id, v, receiver, ignored);
+}
+
+JS_PUBLIC_API bool JS_SetProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name, JS::Handle<JS::Value> v) {
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_SetPropertyById(cx, obj, id, v);
+}
+
+JS_PUBLIC_API bool JS_SetUCProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ JS::Handle<JS::Value> v) {
+ JSAtom* atom = AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_SetPropertyById(cx, obj, id, v);
+}
+
+static bool SetElement(JSContext* cx, JS::Handle<JSObject*> obj, uint32_t index,
+ JS::Handle<JS::Value> v) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, v);
+
+ JS::Rooted<JS::Value> receiver(cx, JS::ObjectValue(*obj));
+ JS::ObjectOpResult ignored;
+ return js::SetElement(cx, obj, index, v, receiver, ignored);
+}
+
+JS_PUBLIC_API bool JS_SetElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, JS::Handle<JS::Value> v) {
+ return ::SetElement(cx, obj, index, v);
+}
+
+JS_PUBLIC_API bool JS_SetElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, JS::Handle<JSObject*> v) {
+ JS::Rooted<JS::Value> value(cx, JS::ObjectOrNullValue(v));
+ return ::SetElement(cx, obj, index, value);
+}
+
+JS_PUBLIC_API bool JS_SetElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, HandleString v) {
+ JS::Rooted<JS::Value> value(cx, JS::StringValue(v));
+ return ::SetElement(cx, obj, index, value);
+}
+
+JS_PUBLIC_API bool JS_SetElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, int32_t v) {
+ JS::Rooted<JS::Value> value(cx, JS::NumberValue(v));
+ return ::SetElement(cx, obj, index, value);
+}
+
+JS_PUBLIC_API bool JS_SetElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, uint32_t v) {
+ JS::Rooted<JS::Value> value(cx, JS::NumberValue(v));
+ return ::SetElement(cx, obj, index, value);
+}
+
+JS_PUBLIC_API bool JS_SetElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index, double v) {
+ JS::Rooted<JS::Value> value(cx, JS::NumberValue(v));
+ return ::SetElement(cx, obj, index, value);
+}
+
+JS_PUBLIC_API bool JS_DeletePropertyById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JS::ObjectOpResult& result) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, id);
+
+ return js::DeleteProperty(cx, obj, id, result);
+}
+
+JS_PUBLIC_API bool JS_DeleteProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name,
+ JS::ObjectOpResult& result) {
+ CHECK_THREAD(cx);
+ cx->check(obj);
+
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return js::DeleteProperty(cx, obj, id, result);
+}
+
+JS_PUBLIC_API bool JS_DeleteUCProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char16_t* name, size_t namelen,
+ JS::ObjectOpResult& result) {
+ CHECK_THREAD(cx);
+ cx->check(obj);
+
+ JSAtom* atom = AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return js::DeleteProperty(cx, obj, id, result);
+}
+
+JS_PUBLIC_API bool JS_DeleteElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index,
+ JS::ObjectOpResult& result) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj);
+
+ return js::DeleteElement(cx, obj, index, result);
+}
+
+JS_PUBLIC_API bool JS_DeletePropertyById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id) {
+ JS::ObjectOpResult ignored;
+ return JS_DeletePropertyById(cx, obj, id, ignored);
+}
+
+JS_PUBLIC_API bool JS_DeleteProperty(JSContext* cx, JS::Handle<JSObject*> obj,
+ const char* name) {
+ JS::ObjectOpResult ignored;
+ return JS_DeleteProperty(cx, obj, name, ignored);
+}
+
+JS_PUBLIC_API bool JS_DeleteElement(JSContext* cx, JS::Handle<JSObject*> obj,
+ uint32_t index) {
+ JS::ObjectOpResult ignored;
+ return JS_DeleteElement(cx, obj, index, ignored);
+}
+
+JS_PUBLIC_API bool JS_Enumerate(JSContext* cx, JS::Handle<JSObject*> obj,
+ JS::MutableHandle<IdVector> props) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, props);
+ MOZ_ASSERT(props.empty());
+
+ JS::RootedVector<JS::PropertyKey> ids(cx);
+ if (!js::GetPropertyKeys(cx, obj, JSITER_OWNONLY, &ids)) {
+ return false;
+ }
+
+ return props.append(ids.begin(), ids.end());
+}
+
+JS_PUBLIC_API JSObject* JS_DefineObject(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ const char* name, const JSClass* clasp,
+ unsigned attrs) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj);
+
+ JS::Rooted<JSObject*> nobj(cx);
+ if (!clasp) {
+ // Default class is Object.
+ nobj = NewPlainObject(cx);
+ } else {
+ nobj = NewBuiltinClassInstance(cx, clasp);
+ }
+ if (!nobj) {
+ return nullptr;
+ }
+
+ JS::Rooted<JS::Value> nobjValue(cx, JS::ObjectValue(*nobj));
+ if (!::DefineDataProperty(cx, obj, name, nobjValue, attrs)) {
+ return nullptr;
+ }
+
+ return nobj;
+}
+
+JS_PUBLIC_API bool JS_DefineProperties(JSContext* cx, JS::Handle<JSObject*> obj,
+ const JSPropertySpec* ps) {
+ JS::Rooted<jsid> id(cx);
+
+ for (; ps->name; ps++) {
+ if (!PropertySpecNameToId(cx, ps->name, &id)) {
+ return false;
+ }
+
+ if (ps->isAccessor()) {
+ if (ps->isSelfHosted()) {
+ if (!::DefineSelfHostedProperty(
+ cx, obj, id, ps->u.accessors.getter.selfHosted.funname,
+ ps->u.accessors.setter.selfHosted.funname, ps->attributes())) {
+ return false;
+ }
+ } else {
+ if (!::DefineAccessorPropertyById(
+ cx, obj, id, ps->u.accessors.getter.native,
+ ps->u.accessors.setter.native, ps->attributes())) {
+ return false;
+ }
+ }
+ } else {
+ JS::Rooted<JS::Value> v(cx);
+ if (!ps->getValue(cx, &v)) {
+ return false;
+ }
+
+ if (!::DefineDataPropertyById(cx, obj, id, v, ps->attributes())) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+JS_PUBLIC_API bool JS_AlreadyHasOwnPropertyById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ bool* foundp) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, id);
+
+ if (!obj->is<NativeObject>()) {
+ return js::HasOwnProperty(cx, obj, id, foundp);
+ }
+
+ PropertyResult prop;
+ if (!NativeLookupOwnPropertyNoResolve(cx, &obj->as<NativeObject>(), id,
+ &prop)) {
+ return false;
+ }
+ *foundp = prop.isFound();
+ return true;
+}
+
+JS_PUBLIC_API bool JS_AlreadyHasOwnProperty(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ const char* name, bool* foundp) {
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_AlreadyHasOwnPropertyById(cx, obj, id, foundp);
+}
+
+JS_PUBLIC_API bool JS_AlreadyHasOwnUCProperty(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ const char16_t* name,
+ size_t namelen, bool* foundp) {
+ JSAtom* atom = AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_AlreadyHasOwnPropertyById(cx, obj, id, foundp);
+}
+
+JS_PUBLIC_API bool JS_AlreadyHasOwnElement(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ uint32_t index, bool* foundp) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ JS::Rooted<jsid> id(cx);
+ if (!IndexToId(cx, index, &id)) {
+ return false;
+ }
+ return JS_AlreadyHasOwnPropertyById(cx, obj, id, foundp);
+}
+
+JS_PUBLIC_API bool JS_DefineFunctions(JSContext* cx, JS::Handle<JSObject*> obj,
+ const JSFunctionSpec* fs) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj);
+
+ return js::DefineFunctions(cx, obj, fs);
+}
+
+JS_PUBLIC_API JSFunction* JS_DefineFunction(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ const char* name, JSNative call,
+ unsigned nargs, unsigned attrs) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj);
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return nullptr;
+ }
+ Rooted<jsid> id(cx, AtomToId(atom));
+ return js::DefineFunction(cx, obj, id, call, nargs, attrs);
+}
+
+JS_PUBLIC_API JSFunction* JS_DefineUCFunction(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ const char16_t* name,
+ size_t namelen, JSNative call,
+ unsigned nargs, unsigned attrs) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj);
+ JSAtom* atom = AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen));
+ if (!atom) {
+ return nullptr;
+ }
+ Rooted<jsid> id(cx, AtomToId(atom));
+ return js::DefineFunction(cx, obj, id, call, nargs, attrs);
+}
+
+JS_PUBLIC_API JSFunction* JS_DefineFunctionById(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ JS::Handle<jsid> id,
+ JSNative call, unsigned nargs,
+ unsigned attrs) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, id);
+ return js::DefineFunction(cx, obj, id, call, nargs, attrs);
+}
diff --git a/js/src/vm/PropertyDescriptor.cpp b/js/src/vm/PropertyDescriptor.cpp
new file mode 100644
index 0000000000..ca5ebbb7b1
--- /dev/null
+++ b/js/src/vm/PropertyDescriptor.cpp
@@ -0,0 +1,91 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/PropertyDescriptor.h"
+
+#include "mozilla/Maybe.h" // mozilla::Maybe
+
+#include <stddef.h> // size_t
+#include <string.h> // strlen
+
+#include "jstypes.h" // JS_PUBLIC_API
+#include "js/Context.h" // js::AssertHeapIsIdle
+#include "js/Id.h" // jsid
+#include "js/RootingAPI.h" // JS::Rooted, JS::Handle, JS::MutableHandle
+#include "vm/JSAtom.h" // JSAtom, Atomize, AtomizeChars
+#include "vm/JSContext.h" // JSContext, CHECK_THREAD
+#include "vm/JSObject.h" // JSObject
+#include "vm/ObjectOperations.h" // GetOwnPropertyDescriptor
+
+#include "vm/JSAtom-inl.h" // AtomToId
+#include "vm/JSContext-inl.h" // JSContext::check
+
+using namespace js;
+
+JS_PUBLIC_API bool JS_GetOwnPropertyDescriptorById(
+ JSContext* cx, JS::Handle<JSObject*> obj, JS::Handle<jsid> id,
+ JS::MutableHandle<mozilla::Maybe<JS::PropertyDescriptor>> desc) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(obj, id);
+ return GetOwnPropertyDescriptor(cx, obj, id, desc);
+}
+
+JS_PUBLIC_API bool JS_GetOwnPropertyDescriptor(
+ JSContext* cx, JS::Handle<JSObject*> obj, const char* name,
+ JS::MutableHandle<mozilla::Maybe<JS::PropertyDescriptor>> desc) {
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_GetOwnPropertyDescriptorById(cx, obj, id, desc);
+}
+
+JS_PUBLIC_API bool JS_GetOwnUCPropertyDescriptor(
+ JSContext* cx, JS::Handle<JSObject*> obj, const char16_t* name,
+ size_t namelen,
+ JS::MutableHandle<mozilla::Maybe<JS::PropertyDescriptor>> desc) {
+ JSAtom* atom = AtomizeChars(cx, name, namelen);
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_GetOwnPropertyDescriptorById(cx, obj, id, desc);
+}
+
+JS_PUBLIC_API bool JS_GetPropertyDescriptorById(
+ JSContext* cx, JS::Handle<JSObject*> obj, JS::Handle<jsid> id,
+ JS::MutableHandle<mozilla::Maybe<JS::PropertyDescriptor>> desc,
+ JS::MutableHandle<JSObject*> holder) {
+ cx->check(obj, id);
+ return GetPropertyDescriptor(cx, obj, id, desc, holder);
+}
+
+JS_PUBLIC_API bool JS_GetPropertyDescriptor(
+ JSContext* cx, JS::Handle<JSObject*> obj, const char* name,
+ JS::MutableHandle<mozilla::Maybe<JS::PropertyDescriptor>> desc,
+ JS::MutableHandle<JSObject*> holder) {
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_GetPropertyDescriptorById(cx, obj, id, desc, holder);
+}
+
+JS_PUBLIC_API bool JS_GetUCPropertyDescriptor(
+ JSContext* cx, JS::Handle<JSObject*> obj, const char16_t* name,
+ size_t namelen,
+ JS::MutableHandle<mozilla::Maybe<JS::PropertyDescriptor>> desc,
+ JS::MutableHandle<JSObject*> holder) {
+ JSAtom* atom = AtomizeChars(cx, name, namelen);
+ if (!atom) {
+ return false;
+ }
+ JS::Rooted<jsid> id(cx, AtomToId(atom));
+ return JS_GetPropertyDescriptorById(cx, obj, id, desc, holder);
+}
diff --git a/js/src/vm/PropertyInfo.h b/js/src/vm/PropertyInfo.h
new file mode 100644
index 0000000000..2b41c3693c
--- /dev/null
+++ b/js/src/vm/PropertyInfo.h
@@ -0,0 +1,221 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_PropertyInfo_h
+#define vm_PropertyInfo_h
+
+#include "mozilla/Assertions.h"
+
+#include <limits>
+#include <stdint.h>
+
+#include "jstypes.h"
+#include "NamespaceImports.h"
+
+#include "js/GCVector.h"
+#include "js/PropertyDescriptor.h"
+#include "util/EnumFlags.h"
+
+namespace js {
+
+/* Limit on the number of slotful properties in an object. */
+static constexpr uint32_t SHAPE_INVALID_SLOT = Bit(24) - 1;
+static constexpr uint32_t SHAPE_MAXIMUM_SLOT = Bit(24) - 2;
+
+// Flags associated with each property stored in the shape tree.
+enum class PropertyFlag : uint8_t {
+ // Property attributes. See also JS::PropertyAttribute.
+ Configurable = 1 << 0,
+ Enumerable = 1 << 1,
+ Writable = 1 << 2,
+
+ // Whether this is an accessor property. Accessor properties have a slot that
+ // stores a GetterSetter instance.
+ AccessorProperty = 1 << 3,
+
+ // If set, this is a custom data property. The property is exposed as a data
+ // property to JS code and PropertyDescriptor, but instead of an object slot
+ // it uses custom get/set logic.
+ //
+ // This is used to implement the special array.length and ArgumentsObject
+ // properties.
+ //
+ // This flag is deprecated (we don't want to add more uses).
+ CustomDataProperty = 1 << 4,
+};
+
+class PropertyFlags : public EnumFlags<PropertyFlag> {
+ using Base = EnumFlags<PropertyFlag>;
+ using Base::Base;
+
+ public:
+ static const PropertyFlags defaultDataPropFlags;
+
+ static PropertyFlags fromRaw(uint8_t flags) { return PropertyFlags(flags); }
+
+ bool configurable() const { return hasFlag(PropertyFlag::Configurable); }
+ bool enumerable() const { return hasFlag(PropertyFlag::Enumerable); }
+ bool writable() const {
+ MOZ_ASSERT(isDataDescriptor());
+ return hasFlag(PropertyFlag::Writable);
+ }
+
+ // Note: this returns true only for plain data properties with a slot. Returns
+ // false for custom data properties. See CustomDataProperty flag.
+ bool isDataProperty() const {
+ return !isAccessorProperty() && !isCustomDataProperty();
+ }
+ bool isAccessorProperty() const {
+ return hasFlag(PropertyFlag::AccessorProperty);
+ }
+ bool isCustomDataProperty() const {
+ return hasFlag(PropertyFlag::CustomDataProperty);
+ }
+
+ // Note: unlike isDataProperty, this returns true also for custom data
+ // properties.
+ bool isDataDescriptor() const { return !isAccessorProperty(); }
+};
+
+constexpr PropertyFlags PropertyFlags::defaultDataPropFlags = {
+ PropertyFlag::Configurable, PropertyFlag::Enumerable,
+ PropertyFlag::Writable};
+
+// PropertyInfo contains information (PropertyFlags, slot number) for a
+// property stored in the Shape tree. Property lookups on NativeObjects return a
+// PropertyInfo.
+//
+// There's also a CompactPropertyInfo type that's used by CompactPropMap to
+// store small slot numbers (CompactPropertyInfo is two bytes instead of four).
+template <typename T>
+class PropertyInfoBase {
+ static_assert(std::is_same_v<T, uint32_t> || std::is_same_v<T, uint16_t>);
+
+ static constexpr uint32_t FlagsMask = 0xff;
+ static constexpr uint32_t SlotShift = 8;
+
+ T slotAndFlags_ = 0;
+
+ static_assert(SHAPE_INVALID_SLOT <= (UINT32_MAX >> SlotShift),
+ "SHAPE_INVALID_SLOT must fit in slotAndFlags_");
+ static_assert(SHAPE_MAXIMUM_SLOT <= (UINT32_MAX >> SlotShift),
+ "SHAPE_MAXIMUM_SLOT must fit in slotAndFlags_");
+
+ // Constructor is private, code should prefer Maybe<PropertyInfo>. This
+ // constructor is only used for the propInfos array in property maps
+ // (CompactPropMap and LinkedPropMap are friend classes for this reason).
+ PropertyInfoBase() = default;
+
+ template <typename U>
+ friend class PropertyInfoBase;
+ friend class CompactPropMap;
+ friend class LinkedPropMap;
+
+ public:
+ static constexpr size_t MaxSlotNumber =
+ std::numeric_limits<T>::max() >> SlotShift;
+
+ PropertyInfoBase(PropertyFlags flags, uint32_t slot)
+ : slotAndFlags_((slot << SlotShift) | flags.toRaw()) {
+ MOZ_ASSERT(maybeSlot() == slot);
+ MOZ_ASSERT(this->flags() == flags);
+ }
+
+ template <typename U>
+ explicit PropertyInfoBase(PropertyInfoBase<U> other)
+ : slotAndFlags_(other.slotAndFlags_) {
+ // Assert assigning PropertyInfo to CompactPropertyInfo doesn't lose
+ // information.
+ MOZ_ASSERT(slotAndFlags_ == other.slotAndFlags_);
+ }
+
+ bool isDataProperty() const { return flags().isDataProperty(); }
+ bool isCustomDataProperty() const { return flags().isCustomDataProperty(); }
+ bool isAccessorProperty() const { return flags().isAccessorProperty(); }
+ bool isDataDescriptor() const { return flags().isDataDescriptor(); }
+
+ bool hasSlot() const { return !isCustomDataProperty(); }
+
+ uint32_t slot() const {
+ MOZ_ASSERT(hasSlot());
+ MOZ_ASSERT(maybeSlot() < SHAPE_INVALID_SLOT);
+ return maybeSlot();
+ }
+
+ uint32_t maybeSlot() const { return slotAndFlags_ >> SlotShift; }
+
+ PropertyFlags flags() const {
+ return PropertyFlags::fromRaw(slotAndFlags_ & FlagsMask);
+ }
+ bool writable() const { return flags().writable(); }
+ bool configurable() const { return flags().configurable(); }
+ bool enumerable() const { return flags().enumerable(); }
+
+ JS::PropertyAttributes propAttributes() const {
+ JS::PropertyAttributes attrs{};
+ if (configurable()) {
+ attrs += JS::PropertyAttribute::Configurable;
+ }
+ if (enumerable()) {
+ attrs += JS::PropertyAttribute::Enumerable;
+ }
+ if (isDataDescriptor() && writable()) {
+ attrs += JS::PropertyAttribute::Writable;
+ }
+ return attrs;
+ }
+
+ T toRaw() const { return slotAndFlags_; }
+
+ bool operator==(const PropertyInfoBase<T>& other) const {
+ return slotAndFlags_ == other.slotAndFlags_;
+ }
+ bool operator!=(const PropertyInfoBase<T>& other) const {
+ return !operator==(other);
+ }
+};
+
+using PropertyInfo = PropertyInfoBase<uint32_t>;
+using CompactPropertyInfo = PropertyInfoBase<uint16_t>;
+
+static_assert(sizeof(PropertyInfo) == sizeof(uint32_t));
+static_assert(sizeof(CompactPropertyInfo) == sizeof(uint16_t));
+
+class PropertyInfoWithKey : public PropertyInfo {
+ PropertyKey key_;
+
+ public:
+ PropertyInfoWithKey(PropertyFlags flags, uint32_t slot, PropertyKey key)
+ : PropertyInfo(flags, slot), key_(key) {}
+
+ PropertyInfoWithKey(PropertyInfo prop, PropertyKey key)
+ : PropertyInfo(prop), key_(key) {}
+
+ PropertyKey key() const { return key_; }
+
+ void trace(JSTracer* trc) {
+ TraceRoot(trc, &key_, "PropertyInfoWithKey-key");
+ }
+};
+
+template <class Wrapper>
+class WrappedPtrOperations<PropertyInfoWithKey, Wrapper> {
+ const PropertyInfoWithKey& value() const {
+ return static_cast<const Wrapper*>(this)->get();
+ }
+
+ public:
+ bool isDataProperty() const { return value().isDataProperty(); }
+ uint32_t slot() const { return value().slot(); }
+ PropertyKey key() const { return value().key(); }
+ PropertyFlags flags() const { return value().flags(); }
+};
+
+using PropertyInfoWithKeyVector = GCVector<PropertyInfoWithKey, 8>;
+
+} // namespace js
+
+#endif /* vm_PropertyInfo_h */
diff --git a/js/src/vm/PropertyKey.h b/js/src/vm/PropertyKey.h
new file mode 100644
index 0000000000..56e2cfea0c
--- /dev/null
+++ b/js/src/vm/PropertyKey.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_PropertyKey_h
+#define vm_PropertyKey_h
+
+#include "mozilla/HashFunctions.h" // mozilla::HashGeneric
+
+#include "NamespaceImports.h" // js::PropertyKey
+
+#include "js/HashTable.h" // js::DefaultHasher
+#include "js/Id.h" // JS::PropertyKey
+#include "vm/StringType.h" // JSAtom::hash
+#include "vm/SymbolType.h" // JS::Symbol::hash
+
+namespace js {
+
+static MOZ_ALWAYS_INLINE HashNumber HashPropertyKey(PropertyKey key) {
+ // HashGeneric alone would work, but bits of atom and symbol addresses
+ // could then be recovered from the hash code. See bug 1330769.
+ if (MOZ_LIKELY(key.isAtom())) {
+ return key.toAtom()->hash();
+ }
+ if (key.isSymbol()) {
+ return key.toSymbol()->hash();
+ }
+ return mozilla::HashGeneric(key.asRawBits());
+}
+
+// Like HashPropertyKey but optimized for callers that only use atom or symbol
+// keys.
+static MOZ_ALWAYS_INLINE HashNumber
+HashAtomOrSymbolPropertyKey(PropertyKey key) {
+ if (MOZ_LIKELY(key.isAtom())) {
+ return key.toAtom()->hash();
+ }
+ return key.toSymbol()->hash();
+}
+
+} // namespace js
+
+namespace mozilla {
+
+template <>
+struct DefaultHasher<JS::PropertyKey> {
+ using Lookup = JS::PropertyKey;
+ static HashNumber hash(JS::PropertyKey key) {
+ return js::HashPropertyKey(key);
+ }
+ static bool match(JS::PropertyKey key1, JS::PropertyKey key2) {
+ return key1 == key2;
+ }
+};
+
+} // namespace mozilla
+
+#endif /* vm_PropertyKey_h */
diff --git a/js/src/vm/PropertyResult.h b/js/src/vm/PropertyResult.h
new file mode 100644
index 0000000000..e9e9b4c1cc
--- /dev/null
+++ b/js/src/vm/PropertyResult.h
@@ -0,0 +1,103 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_PropertyResult_h
+#define vm_PropertyResult_h
+
+#include "mozilla/Assertions.h"
+
+#include "vm/PropertyInfo.h"
+
+namespace js {
+
+class PropertyResult {
+ enum class Kind : uint8_t {
+ NotFound,
+ NativeProperty,
+ NonNativeProperty,
+ DenseElement,
+ TypedArrayElement,
+ };
+ union {
+ // Set if kind is NativeProperty.
+ PropertyInfo propInfo_;
+ // Set if kind is DenseElement.
+ uint32_t denseIndex_;
+ // Set if kind is TypedArrayElement.
+ size_t typedArrayIndex_;
+ };
+ Kind kind_ = Kind::NotFound;
+ bool ignoreProtoChain_ = false;
+
+ public:
+ // Note: because PropertyInfo does not have a default constructor, we can't
+ // use |= default| here.
+ PropertyResult() {}
+
+ // When a property is not found, we may additionally indicate that the
+ // prototype chain should be ignored. This occurs for:
+ // - An out-of-range numeric property on a TypedArrayObject.
+ // - A resolve hook recursively calling itself as it sets the property.
+ bool isNotFound() const { return kind_ == Kind::NotFound; }
+ bool shouldIgnoreProtoChain() const {
+ MOZ_ASSERT(isNotFound());
+ return ignoreProtoChain_;
+ }
+
+ bool isFound() const { return kind_ != Kind::NotFound; }
+ bool isNonNativeProperty() const { return kind_ == Kind::NonNativeProperty; }
+ bool isDenseElement() const { return kind_ == Kind::DenseElement; }
+ bool isTypedArrayElement() const { return kind_ == Kind::TypedArrayElement; }
+ bool isNativeProperty() const { return kind_ == Kind::NativeProperty; }
+
+ PropertyInfo propertyInfo() const {
+ MOZ_ASSERT(isNativeProperty());
+ return propInfo_;
+ }
+
+ uint32_t denseElementIndex() const {
+ MOZ_ASSERT(isDenseElement());
+ return denseIndex_;
+ }
+
+ size_t typedArrayElementIndex() const {
+ MOZ_ASSERT(isTypedArrayElement());
+ return typedArrayIndex_;
+ }
+
+ void setNotFound() { kind_ = Kind::NotFound; }
+
+ void setNativeProperty(PropertyInfo prop) {
+ kind_ = Kind::NativeProperty;
+ propInfo_ = prop;
+ }
+
+ void setWasmGcProperty() { kind_ = Kind::NonNativeProperty; }
+ void setProxyProperty() { kind_ = Kind::NonNativeProperty; }
+
+ void setDenseElement(uint32_t index) {
+ kind_ = Kind::DenseElement;
+ denseIndex_ = index;
+ }
+
+ void setTypedArrayElement(size_t index) {
+ kind_ = Kind::TypedArrayElement;
+ typedArrayIndex_ = index;
+ }
+
+ void setTypedArrayOutOfRange() {
+ kind_ = Kind::NotFound;
+ ignoreProtoChain_ = true;
+ }
+ void setRecursiveResolve() {
+ kind_ = Kind::NotFound;
+ ignoreProtoChain_ = true;
+ }
+};
+
+} // namespace js
+
+#endif /* vm_PropertyResult_h */
diff --git a/js/src/vm/ProxyObject.cpp b/js/src/vm/ProxyObject.cpp
new file mode 100644
index 0000000000..0eff35d38e
--- /dev/null
+++ b/js/src/vm/ProxyObject.cpp
@@ -0,0 +1,206 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/ProxyObject.h"
+
+#include "gc/Allocator.h"
+#include "gc/GCProbes.h"
+#include "gc/Marking.h"
+#include "gc/Zone.h"
+#include "proxy/DeadObjectProxy.h"
+#include "vm/Compartment.h"
+#include "vm/Realm.h"
+
+#include "gc/ObjectKind-inl.h"
+
+using namespace js;
+
+static gc::AllocKind GetProxyGCObjectKind(const JSClass* clasp,
+ const BaseProxyHandler* handler,
+ const Value& priv,
+ bool withInlineValues) {
+ MOZ_ASSERT(clasp->isProxyObject());
+
+ uint32_t nreserved = JSCLASS_RESERVED_SLOTS(clasp);
+
+ // For now assert each Proxy Class has at least 1 reserved slot. This is
+ // not a hard requirement, but helps catch Classes that need an explicit
+ // JSCLASS_HAS_RESERVED_SLOTS since bug 1360523.
+ MOZ_ASSERT(nreserved > 0);
+
+ uint32_t nslots = 0;
+ if (withInlineValues) {
+ nslots = detail::ProxyValueArray::allocCount(nreserved);
+ }
+
+ MOZ_ASSERT(nslots <= NativeObject::MAX_FIXED_SLOTS);
+ gc::AllocKind kind = gc::GetGCObjectKind(nslots);
+ if (handler->finalizeInBackground(priv)) {
+ kind = ForegroundToBackgroundAllocKind(kind);
+ }
+
+ return kind;
+}
+
+void ProxyObject::init(const BaseProxyHandler* handler, HandleValue priv,
+ JSContext* cx) {
+ setInlineValueArray();
+
+ detail::ProxyValueArray* values = detail::GetProxyDataLayout(this)->values();
+ values->init(numReservedSlots());
+
+ data.handler = handler;
+
+ if (IsCrossCompartmentWrapper(this)) {
+ MOZ_ASSERT(cx->global() == &cx->compartment()->globalForNewCCW());
+ setCrossCompartmentPrivate(priv);
+ } else {
+ setSameCompartmentPrivate(priv);
+ }
+
+ // The expando slot is nullptr until required by the installation of
+ // a private field.
+ setExpando(nullptr);
+}
+
+/* static */
+ProxyObject* ProxyObject::New(JSContext* cx, const BaseProxyHandler* handler,
+ HandleValue priv, TaggedProto proto_,
+ const JSClass* clasp) {
+ Rooted<TaggedProto> proto(cx, proto_);
+
+ MOZ_ASSERT(!clasp->isNativeObject());
+ MOZ_ASSERT(clasp->isProxyObject());
+ MOZ_ASSERT(isValidProxyClass(clasp));
+ MOZ_ASSERT(clasp->shouldDelayMetadataBuilder());
+ MOZ_ASSERT_IF(proto.isObject(),
+ cx->compartment() == proto.toObject()->compartment());
+ MOZ_ASSERT(clasp->hasFinalize());
+
+#ifdef DEBUG
+ if (priv.isGCThing()) {
+ JS::AssertCellIsNotGray(priv.toGCThing());
+ }
+#endif
+
+ gc::AllocKind allocKind = GetProxyGCObjectKind(clasp, handler, priv,
+ /* withInlineValues = */ true);
+
+ Realm* realm = cx->realm();
+
+ AutoSetNewObjectMetadata metadata(cx);
+ // Try to look up the shape in the NewProxyCache.
+ Rooted<Shape*> shape(cx);
+ if (!realm->newProxyCache.lookup(clasp, proto, shape.address())) {
+ shape = ProxyShape::getShape(cx, clasp, realm, proto, ObjectFlags());
+ if (!shape) {
+ return nullptr;
+ }
+
+ realm->newProxyCache.add(shape);
+ }
+
+ MOZ_ASSERT(shape->realm() == realm);
+ MOZ_ASSERT(!IsAboutToBeFinalizedUnbarriered(shape.get()));
+
+ // Ensure that the wrapper has the same lifetime assumptions as the
+ // wrappee. Prefer to allocate in the nursery, when possible.
+ gc::Heap heap;
+ if ((priv.isGCThing() && priv.toGCThing()->isTenured()) ||
+ !handler->canNurseryAllocate()) {
+ heap = gc::Heap::Tenured;
+ } else {
+ heap = gc::Heap::Default;
+ }
+
+ debugCheckNewObject(shape, allocKind, heap);
+
+ ProxyObject* proxy = cx->newCell<ProxyObject>(allocKind, heap, clasp);
+ if (!proxy) {
+ return nullptr;
+ }
+
+ proxy->initShape(shape);
+
+ MOZ_ASSERT(clasp->shouldDelayMetadataBuilder());
+ realm->setObjectPendingMetadata(proxy);
+
+ gc::gcprobes::CreateObject(proxy);
+
+ proxy->init(handler, priv, cx);
+
+ return proxy;
+}
+
+gc::AllocKind ProxyObject::allocKindForTenure() const {
+ Value priv = private_();
+ return GetProxyGCObjectKind(getClass(), data.handler, priv,
+ usingInlineValueArray());
+}
+
+void ProxyObject::setCrossCompartmentPrivate(const Value& priv) {
+ setPrivate(priv);
+}
+
+void ProxyObject::setSameCompartmentPrivate(const Value& priv) {
+ MOZ_ASSERT(IsObjectValueInCompartment(priv, compartment()));
+ setPrivate(priv);
+}
+
+inline void ProxyObject::setPrivate(const Value& priv) {
+#ifdef DEBUG
+ JS::AssertValueIsNotGray(priv);
+#endif
+ *slotOfPrivate() = priv;
+}
+
+void ProxyObject::setExpando(JSObject* expando) {
+ // Ensure we're in the same compartment as the proxy object: Don't want the
+ // expando to end up as a CCW.
+ MOZ_ASSERT_IF(expando, expando->compartment() == compartment());
+
+ // Ensure that we don't accidentally end up pointing to a
+ // grey object, which would violate GC invariants.
+ MOZ_ASSERT_IF(!zone()->isGCPreparing() && isMarkedBlack() && expando,
+ !JS::GCThingIsMarkedGray(JS::GCCellPtr(expando)));
+
+ *slotOfExpando() = ObjectOrNullValue(expando);
+}
+
+void ProxyObject::nuke() {
+ // Notify the zone that a delegate is no longer a delegate. Be careful not to
+ // expose this pointer, because it has already been removed from the wrapper
+ // map yet we have assertions during tracing that will verify that it is
+ // still present.
+ JSObject* delegate = UncheckedUnwrapWithoutExpose(this);
+ if (delegate != this) {
+ delegate->zone()->beforeClearDelegate(this, delegate);
+ }
+
+ // Clear the target reference and replaced it with a value that encodes
+ // various information about the original target.
+ setSameCompartmentPrivate(DeadProxyTargetValue(this));
+
+ // Clear out the expando
+ setExpando(nullptr);
+
+ // Update the handler to make this a DeadObjectProxy.
+ setHandler(&DeadObjectProxy::singleton);
+
+ // The proxy's reserved slots are not cleared and will continue to be
+ // traced. This avoids the possibility of triggering write barriers while
+ // nuking proxies in dead compartments which could otherwise cause those
+ // compartments to be kept alive. Note that these are slots cannot hold
+ // cross compartment pointers, so this cannot cause the target compartment
+ // to leak.
+}
+
+JS_PUBLIC_API void js::detail::SetValueInProxy(Value* slot,
+ const Value& value) {
+ // Slots in proxies are not GCPtr<Value>s, so do a cast whenever assigning
+ // values to them which might trigger a barrier.
+ *reinterpret_cast<GCPtr<Value>*>(slot) = value;
+}
diff --git a/js/src/vm/ProxyObject.h b/js/src/vm/ProxyObject.h
new file mode 100644
index 0000000000..b583bdff36
--- /dev/null
+++ b/js/src/vm/ProxyObject.h
@@ -0,0 +1,165 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ProxyObject_h
+#define vm_ProxyObject_h
+
+#include "js/Proxy.h"
+#include "js/shadow/Object.h" // JS::shadow::Object
+#include "vm/JSObject.h"
+
+namespace js {
+
+/**
+ * This is the base class for the various kinds of proxy objects. It's never
+ * instantiated.
+ *
+ * Proxy objects use their shape primarily to record flags. Property
+ * information, &c. is all dynamically computed.
+ *
+ * There is no class_ member to force specialization of JSObject::is<T>().
+ * The implementation in JSObject is incorrect for proxies since it doesn't
+ * take account of the handler type.
+ */
+class ProxyObject : public JSObject {
+ // GetProxyDataLayout computes the address of this field.
+ detail::ProxyDataLayout data;
+
+ void static_asserts() {
+ static_assert(sizeof(ProxyObject) == sizeof(JSObject_Slots0),
+ "proxy object size must match GC thing size");
+ static_assert(offsetof(ProxyObject, data) == detail::ProxyDataOffset,
+ "proxy object layout must match shadow interface");
+ static_assert(offsetof(ProxyObject, data.reservedSlots) ==
+ offsetof(JS::shadow::Object, slots),
+ "Proxy reservedSlots must overlay native object slots field");
+ }
+
+ public:
+ static ProxyObject* New(JSContext* cx, const BaseProxyHandler* handler,
+ HandleValue priv, TaggedProto proto_,
+ const JSClass* clasp);
+
+ void init(const BaseProxyHandler* handler, HandleValue priv, JSContext* cx);
+
+ // Proxies usually store their ProxyValueArray inline in the object.
+ // There's one unfortunate exception: when a proxy is swapped with another
+ // object, and the sizes don't match, we malloc the ProxyValueArray.
+ void* inlineDataStart() const {
+ return (void*)(uintptr_t(this) + sizeof(ProxyObject));
+ }
+ bool usingInlineValueArray() const {
+ return data.values() == inlineDataStart();
+ }
+ void setInlineValueArray() {
+ data.reservedSlots =
+ &reinterpret_cast<detail::ProxyValueArray*>(inlineDataStart())
+ ->reservedSlots;
+ }
+
+ // For use from JSObject::swap.
+ [[nodiscard]] bool prepareForSwap(JSContext* cx,
+ MutableHandleValueVector valuesOut);
+ [[nodiscard]] bool fixupAfterSwap(JSContext* cx, HandleValueVector values);
+
+ const Value& private_() const { return GetProxyPrivate(this); }
+ const Value& expando() const { return GetProxyExpando(this); }
+
+ void setExpando(JSObject* expando);
+
+ void setCrossCompartmentPrivate(const Value& priv);
+ void setSameCompartmentPrivate(const Value& priv);
+
+ JSObject* target() const { return private_().toObjectOrNull(); }
+
+ const BaseProxyHandler* handler() const { return GetProxyHandler(this); }
+
+ void setHandler(const BaseProxyHandler* handler) {
+ SetProxyHandler(this, handler);
+ }
+
+ static size_t offsetOfReservedSlots() {
+ return offsetof(ProxyObject, data.reservedSlots);
+ }
+ static size_t offsetOfHandler() {
+ return offsetof(ProxyObject, data.handler);
+ }
+
+ size_t numReservedSlots() const { return JSCLASS_RESERVED_SLOTS(getClass()); }
+ const Value& reservedSlot(size_t n) const {
+ return GetProxyReservedSlot(this, n);
+ }
+
+ void setReservedSlot(size_t n, const Value& extra) {
+ SetProxyReservedSlot(this, n, extra);
+ }
+
+ gc::AllocKind allocKindForTenure() const;
+
+ private:
+ GCPtr<Value>* reservedSlotPtr(size_t n) {
+ return reinterpret_cast<GCPtr<Value>*>(
+ &detail::GetProxyDataLayout(this)->reservedSlots->slots[n]);
+ }
+
+ GCPtr<Value>* slotOfPrivate() {
+ return reinterpret_cast<GCPtr<Value>*>(
+ &detail::GetProxyDataLayout(this)->values()->privateSlot);
+ }
+
+ GCPtr<Value>* slotOfExpando() {
+ return reinterpret_cast<GCPtr<Value>*>(
+ &detail::GetProxyDataLayout(this)->values()->expandoSlot);
+ }
+
+ void setPrivate(const Value& priv);
+
+ static bool isValidProxyClass(const JSClass* clasp) {
+ // Since we can take classes from the outside, make sure that they
+ // are "sane". They have to quack enough like proxies for us to belive
+ // they should be treated as such.
+
+ // Proxy classes are not allowed to have call or construct hooks directly.
+ // Their callability is instead decided by handler()->isCallable().
+ return clasp->isProxyObject() && clasp->isTrace(ProxyObject::trace) &&
+ !clasp->getCall() && !clasp->getConstruct();
+ }
+
+ public:
+ static unsigned grayLinkReservedSlot(JSObject* obj);
+
+ void renew(const BaseProxyHandler* handler, const Value& priv);
+
+ static void trace(JSTracer* trc, JSObject* obj);
+
+ static void traceEdgeToTarget(JSTracer* trc, ProxyObject* obj);
+
+ void nurseryProxyTenured(ProxyObject* old);
+
+ void nuke();
+};
+
+bool IsDerivedProxyObject(const JSObject* obj,
+ const js::BaseProxyHandler* handler);
+
+} // namespace js
+
+template <>
+inline bool JSObject::is<js::ProxyObject>() const {
+ // Note: this method is implemented in terms of the IsProxy() friend API
+ // functions to ensure the implementations are tied together.
+ // Note 2: this specialization isn't used for subclasses of ProxyObject
+ // which must supply their own implementation.
+ return js::IsProxy(this);
+}
+
+inline bool js::IsDerivedProxyObject(const JSObject* obj,
+ const js::BaseProxyHandler* handler) {
+ return obj->is<js::ProxyObject>() &&
+ obj->as<js::ProxyObject>().handler() == handler;
+}
+
+#endif /* vm_ProxyObject_h */
diff --git a/js/src/vm/Realm-inl.h b/js/src/vm/Realm-inl.h
new file mode 100644
index 0000000000..698d0b3f74
--- /dev/null
+++ b/js/src/vm/Realm-inl.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Realm_inl_h
+#define vm_Realm_inl_h
+
+#include "vm/Realm.h"
+
+#include "gc/Barrier.h"
+#include "gc/Marking.h"
+#include "vm/GlobalObject.h"
+
+#include "vm/JSContext-inl.h"
+
+inline void JS::Realm::initGlobal(js::GlobalObject& global) {
+ MOZ_ASSERT(global.realm() == this);
+ MOZ_ASSERT(!global_);
+ global_.set(&global);
+}
+
+js::GlobalObject* JS::Realm::maybeGlobal() const {
+ MOZ_ASSERT_IF(global_, global_->realm() == this);
+ return global_;
+}
+
+inline bool JS::Realm::hasLiveGlobal() const {
+ // The global is swept by traceWeakGlobalEdge when we start sweeping a zone
+ // group. This frees the GlobalObjectData, so the realm must live at least as
+ // long as the global.
+ MOZ_ASSERT_IF(global_, !js::gc::IsAboutToBeFinalized(global_));
+ return bool(global_);
+}
+
+inline bool JS::Realm::hasInitializedGlobal() const {
+ return hasLiveGlobal() && !initializingGlobal_;
+}
+
+inline bool JS::Realm::marked() const {
+ // The Realm survives in the following cases:
+ // - its global is live
+ // - it has been entered (to ensure we don't destroy the Realm while we're
+ // allocating its global)
+ // - it was allocated after the start of an incremental GC (as there may be
+ // pointers to it from other GC things)
+ return hasLiveGlobal() || hasBeenEnteredIgnoringJit() ||
+ allocatedDuringIncrementalGC_;
+}
+
+/* static */ inline js::ObjectRealm& js::ObjectRealm::get(const JSObject* obj) {
+ // Note: obj might be a CCW if we're accessing ObjectRealm::enumerators.
+ // CCWs here are fine because we always return the same ObjectRealm for a
+ // particular (CCW) object.
+ return obj->maybeCCWRealm()->objects_;
+}
+
+template <typename T>
+js::AutoRealm::AutoRealm(JSContext* cx, const T& target)
+ : cx_(cx), origin_(cx->realm()) {
+ cx_->enterRealmOf(target);
+}
+
+// Protected constructor that bypasses assertions in enterRealmOf.
+js::AutoRealm::AutoRealm(JSContext* cx, JS::Realm* target)
+ : cx_(cx), origin_(cx->realm()) {
+ cx_->enterRealm(target);
+}
+
+js::AutoRealm::~AutoRealm() { cx_->leaveRealm(origin_); }
+
+js::AutoFunctionOrCurrentRealm::AutoFunctionOrCurrentRealm(JSContext* cx,
+ HandleObject fun) {
+ JS::Realm* realm = JS::GetFunctionRealm(cx, fun);
+ if (!realm) {
+ cx->clearPendingException();
+ return;
+ }
+
+ // Enter the function's realm.
+ ar_.emplace(cx, realm);
+}
+
+js::AutoAllocInAtomsZone::AutoAllocInAtomsZone(JSContext* cx)
+ : cx_(cx), origin_(cx->realm()) {
+ cx_->enterAtomsZone();
+}
+
+js::AutoAllocInAtomsZone::~AutoAllocInAtomsZone() {
+ cx_->leaveAtomsZone(origin_);
+}
+
+js::AutoMaybeLeaveAtomsZone::AutoMaybeLeaveAtomsZone(JSContext* cx)
+ : cx_(cx), wasInAtomsZone_(cx->zone() && cx->zone()->isAtomsZone()) {
+ if (wasInAtomsZone_) {
+ cx_->leaveAtomsZone(nullptr);
+ }
+}
+
+js::AutoMaybeLeaveAtomsZone::~AutoMaybeLeaveAtomsZone() {
+ if (wasInAtomsZone_) {
+ cx_->enterAtomsZone();
+ }
+}
+
+js::AutoRealmUnchecked::AutoRealmUnchecked(JSContext* cx, JS::Realm* target)
+ : AutoRealm(cx, target) {}
+
+#endif /* vm_Realm_inl_h */
diff --git a/js/src/vm/Realm.cpp b/js/src/vm/Realm.cpp
new file mode 100644
index 0000000000..f27ddc8f06
--- /dev/null
+++ b/js/src/vm/Realm.cpp
@@ -0,0 +1,774 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/shadow/Realm.h" // JS::shadow::Realm
+#include "vm/Realm-inl.h"
+
+#include "mozilla/MemoryReporting.h"
+
+#include <stddef.h>
+
+#include "jsfriendapi.h"
+
+#include "builtin/WrappedFunctionObject.h"
+#include "debugger/DebugAPI.h"
+#include "debugger/Debugger.h"
+#include "gc/GC.h"
+#include "jit/JitRealm.h"
+#include "jit/JitRuntime.h"
+#include "js/CallAndConstruct.h" // JS::IsCallable
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/GCVariant.h"
+#include "js/Proxy.h"
+#include "js/RootingAPI.h"
+#include "js/Wrapper.h"
+#include "vm/Compartment.h"
+#include "vm/DateTime.h"
+#include "vm/Iteration.h"
+#include "vm/JSContext.h"
+#include "vm/PIC.h"
+
+#include "gc/Marking-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+
+Realm::DebuggerVectorEntry::DebuggerVectorEntry(js::Debugger* dbg_,
+ JSObject* link)
+ : dbg(dbg_), debuggerLink(link) {}
+
+ObjectRealm::ObjectRealm(JS::Zone* zone)
+ : innerViews(zone, zone), iteratorCache(zone) {}
+
+Realm::Realm(Compartment* comp, const JS::RealmOptions& options)
+ : JS::shadow::Realm(comp),
+ zone_(comp->zone()),
+ runtime_(comp->runtimeFromMainThread()),
+ creationOptions_(options.creationOptions()),
+ behaviors_(options.behaviors()),
+ objects_(zone_),
+ randomKeyGenerator_(runtime_->forkRandomKeyGenerator()),
+ debuggers_(zone_),
+ allocatedDuringIncrementalGC_(zone_->isGCMarkingOrSweeping() ||
+ zone_->isGCFinished()),
+ wasm(runtime_) {
+ runtime_->numRealms++;
+}
+
+Realm::~Realm() {
+ MOZ_ASSERT(!hasBeenEnteredIgnoringJit());
+ MOZ_ASSERT(!isDebuggee());
+
+ // Write the code coverage information in a file.
+ if (lcovRealm_) {
+ runtime_->lcovOutput().writeLCovResult(*lcovRealm_);
+ }
+
+ MOZ_ASSERT(runtime_->numRealms > 0);
+ runtime_->numRealms--;
+}
+
+void Realm::init(JSContext* cx, JSPrincipals* principals) {
+ /*
+ * As a hack, we clear our timezone cache every time we create a new realm.
+ * This ensures that the cache is always relatively fresh, but shouldn't
+ * interfere with benchmarks that create tons of date objects (unless they
+ * also create tons of iframes, which seems unlikely).
+ */
+ js::ResetTimeZoneInternal(ResetTimeZoneMode::DontResetIfOffsetUnchanged);
+
+ if (principals) {
+ // Any realm with the trusted principals -- and there can be
+ // multiple -- is a system realm.
+ isSystem_ = (principals == cx->runtime()->trustedPrincipals());
+ JS_HoldPrincipals(principals);
+ principals_ = principals;
+ }
+}
+
+bool JSRuntime::createJitRuntime(JSContext* cx) {
+ using namespace js::jit;
+
+ MOZ_ASSERT(!jitRuntime_);
+
+ if (!CanLikelyAllocateMoreExecutableMemory()) {
+ // Try to release memory first instead of potentially reporting OOM below.
+ if (OnLargeAllocationFailure) {
+ OnLargeAllocationFailure();
+ }
+ }
+
+ jit::JitRuntime* jrt = cx->new_<jit::JitRuntime>();
+ if (!jrt) {
+ return false;
+ }
+
+ // Unfortunately, initialization depends on jitRuntime_ being non-null, so
+ // we can't just wait to assign jitRuntime_.
+ jitRuntime_ = jrt;
+
+ if (!jitRuntime_->initialize(cx)) {
+ js_delete(jitRuntime_.ref());
+ jitRuntime_ = nullptr;
+ return false;
+ }
+
+ return true;
+}
+
+bool Realm::ensureJitRealmExists(JSContext* cx) {
+ using namespace js::jit;
+
+ if (jitRealm_) {
+ return true;
+ }
+
+ if (!zone()->getJitZone(cx)) {
+ return false;
+ }
+
+ UniquePtr<JitRealm> jitRealm = cx->make_unique<JitRealm>();
+ if (!jitRealm) {
+ return false;
+ }
+
+ jitRealm->initialize(zone()->allocNurseryStrings());
+
+ jitRealm_ = std::move(jitRealm);
+ return true;
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+
+void js::DtoaCache::checkCacheAfterMovingGC() {
+ MOZ_ASSERT(!str || !IsForwarded(str));
+}
+
+#endif // JSGC_HASH_TABLE_CHECKS
+
+NonSyntacticLexicalEnvironmentObject*
+ObjectRealm::getOrCreateNonSyntacticLexicalEnvironment(JSContext* cx,
+ HandleObject enclosing,
+ HandleObject key,
+ HandleObject thisv) {
+ MOZ_ASSERT(&ObjectRealm::get(enclosing) == this);
+
+ if (!nonSyntacticLexicalEnvironments_) {
+ auto map = cx->make_unique<ObjectWeakMap>(cx);
+ if (!map) {
+ return nullptr;
+ }
+
+ nonSyntacticLexicalEnvironments_ = std::move(map);
+ }
+
+ RootedObject lexicalEnv(cx, nonSyntacticLexicalEnvironments_->lookup(key));
+
+ if (!lexicalEnv) {
+ MOZ_ASSERT(key->is<NonSyntacticVariablesObject>() ||
+ !key->is<EnvironmentObject>());
+ lexicalEnv =
+ NonSyntacticLexicalEnvironmentObject::create(cx, enclosing, thisv);
+ if (!lexicalEnv) {
+ return nullptr;
+ }
+ if (!nonSyntacticLexicalEnvironments_->add(cx, key, lexicalEnv)) {
+ return nullptr;
+ }
+ }
+
+ return &lexicalEnv->as<NonSyntacticLexicalEnvironmentObject>();
+}
+
+NonSyntacticLexicalEnvironmentObject*
+ObjectRealm::getOrCreateNonSyntacticLexicalEnvironment(JSContext* cx,
+ HandleObject enclosing) {
+ // If a wrapped WithEnvironmentObject was passed in, unwrap it, as we may
+ // be creating different WithEnvironmentObject wrappers each time.
+ RootedObject key(cx, enclosing);
+ if (enclosing->is<WithEnvironmentObject>()) {
+ MOZ_ASSERT(!enclosing->as<WithEnvironmentObject>().isSyntactic());
+ key = &enclosing->as<WithEnvironmentObject>().object();
+ }
+
+ // NOTE: The default global |this| value is set to key for compatibility
+ // with existing users of the lexical environment cache.
+ // - When used by shared-global JSM loader, |this| must be the
+ // NonSyntacticVariablesObject passed as enclosing.
+ // - When used by SubscriptLoader, |this| must be the target object of
+ // the WithEnvironmentObject wrapper.
+ // - When used by XBL/DOM Events, we execute directly as a function and
+ // do not access the |this| value.
+ // See js::GetFunctionThis / js::GetNonSyntacticGlobalThis
+ return getOrCreateNonSyntacticLexicalEnvironment(cx, enclosing, key,
+ /*thisv = */ key);
+}
+
+NonSyntacticLexicalEnvironmentObject*
+ObjectRealm::getNonSyntacticLexicalEnvironment(JSObject* key) const {
+ MOZ_ASSERT(&ObjectRealm::get(key) == this);
+
+ if (!nonSyntacticLexicalEnvironments_) {
+ return nullptr;
+ }
+ // If a wrapped WithEnvironmentObject was passed in, unwrap it as in
+ // getOrCreateNonSyntacticLexicalEnvironment.
+ if (key->is<WithEnvironmentObject>()) {
+ MOZ_ASSERT(!key->as<WithEnvironmentObject>().isSyntactic());
+ key = &key->as<WithEnvironmentObject>().object();
+ }
+ JSObject* lexicalEnv = nonSyntacticLexicalEnvironments_->lookup(key);
+ if (!lexicalEnv) {
+ return nullptr;
+ }
+ return &lexicalEnv->as<NonSyntacticLexicalEnvironmentObject>();
+}
+
+void Realm::traceGlobalData(JSTracer* trc) {
+ // Trace things reachable from the realm's global. Note that these edges
+ // must be swept too in case the realm is live but the global is not.
+
+ savedStacks_.trace(trc);
+
+ DebugAPI::traceFromRealm(trc, this);
+}
+
+void ObjectRealm::trace(JSTracer* trc) {
+ if (objectMetadataTable) {
+ objectMetadataTable->trace(trc);
+ }
+
+ if (nonSyntacticLexicalEnvironments_) {
+ nonSyntacticLexicalEnvironments_->trace(trc);
+ }
+}
+
+void Realm::traceRoots(JSTracer* trc,
+ js::gc::GCRuntime::TraceOrMarkRuntime traceOrMark) {
+ // It's not possible to trigger a GC between allocating the pending object and
+ // setting its meta data in ~AutoSetNewObjectMetadata.
+ MOZ_RELEASE_ASSERT(!objectPendingMetadata_);
+
+ if (!JS::RuntimeHeapIsMinorCollecting()) {
+ // The global is never nursery allocated, so we don't need to
+ // trace it when doing a minor collection.
+ //
+ // If a realm is on-stack, we mark its global so that
+ // JSContext::global() remains valid.
+ if (shouldTraceGlobal() && global_) {
+ TraceRoot(trc, global_.unbarrieredAddress(), "on-stack realm global");
+ }
+ }
+
+ // Nothing below here needs to be treated as a root if we aren't marking
+ // this zone for a collection.
+ if (traceOrMark == js::gc::GCRuntime::MarkRuntime &&
+ !zone()->isCollectingFromAnyThread()) {
+ return;
+ }
+
+ /* Mark debug scopes, if present */
+ if (debugEnvs_) {
+ debugEnvs_->trace(trc);
+ }
+
+ objects_.trace(trc);
+}
+
+void ObjectRealm::finishRoots() {
+ if (objectMetadataTable) {
+ objectMetadataTable->clear();
+ }
+
+ if (nonSyntacticLexicalEnvironments_) {
+ nonSyntacticLexicalEnvironments_->clear();
+ }
+}
+
+void Realm::finishRoots() {
+ if (debugEnvs_) {
+ debugEnvs_->finish();
+ }
+
+ objects_.finishRoots();
+}
+
+void ObjectRealm::sweepAfterMinorGC(JSTracer* trc) {
+ InnerViewTable& table = innerViews.get();
+ if (table.needsSweepAfterMinorGC()) {
+ table.sweepAfterMinorGC(trc);
+ }
+}
+
+void Realm::sweepAfterMinorGC(JSTracer* trc) {
+ globalWriteBarriered = 0;
+ dtoaCache.purge();
+ objects_.sweepAfterMinorGC(trc);
+}
+
+void Realm::traceWeakSavedStacks(JSTracer* trc) { savedStacks_.traceWeak(trc); }
+
+void Realm::traceWeakGlobalEdge(JSTracer* trc) {
+ // If the global is dead, free its GlobalObjectData.
+ auto result = TraceWeakEdge(trc, &global_, "Realm::global_");
+ if (result.isDead()) {
+ result.initialTarget()->releaseData(runtime_->gcContext());
+ }
+}
+
+void Realm::traceWeakEdgesInJitRealm(JSTracer* trc) {
+ if (jitRealm_) {
+ jitRealm_->traceWeak(trc, this);
+ }
+}
+
+void Realm::traceWeakRegExps(JSTracer* trc) {
+ /*
+ * JIT code increments activeWarmUpCounter for any RegExpShared used by jit
+ * code for the lifetime of the JIT script. Thus, we must perform
+ * sweeping after clearing jit code.
+ */
+ regExps.traceWeak(trc);
+}
+
+void Realm::traceWeakDebugEnvironmentEdges(JSTracer* trc) {
+ if (debugEnvs_) {
+ debugEnvs_->traceWeak(trc);
+ }
+}
+
+void Realm::fixupAfterMovingGC(JSTracer* trc) {
+ purge();
+ traceWeakGlobalEdge(trc);
+}
+
+void Realm::purge() {
+ dtoaCache.purge();
+ newProxyCache.purge();
+ newPlainObjectWithPropsCache.purge();
+ objects_.iteratorCache.clearAndCompact();
+ arraySpeciesLookup.purge();
+ promiseLookup.purge();
+
+ if (zone()->isGCPreparing()) {
+ purgeForOfPicChain();
+ }
+}
+
+void Realm::purgeForOfPicChain() {
+ if (GlobalObject* global = global_.unbarrieredGet()) {
+ if (NativeObject* object = global->getForOfPICObject()) {
+ ForOfPIC::Chain* chain = ForOfPIC::fromJSObject(object);
+ chain->freeAllStubs(runtime_->gcContext());
+ }
+ }
+}
+
+// Check to see if this individual realm is recording allocations. Debuggers or
+// runtimes can try and record allocations, so this method can check to see if
+// any initialization is needed.
+bool Realm::isRecordingAllocations() { return !!allocationMetadataBuilder_; }
+
+void Realm::setAllocationMetadataBuilder(
+ const js::AllocationMetadataBuilder* builder) {
+ // Clear any jitcode in the runtime, which behaves differently depending on
+ // whether there is a creation callback.
+ ReleaseAllJITCode(runtime_->gcContext());
+
+ allocationMetadataBuilder_ = builder;
+}
+
+void Realm::forgetAllocationMetadataBuilder() {
+ // Unlike setAllocationMetadataBuilder, we don't have to discard all JIT
+ // code here (code is still valid, just a bit slower because it doesn't do
+ // inline GC allocations when a metadata builder is present), but we do want
+ // to cancel off-thread Ion compilations to avoid races when Ion calls
+ // hasAllocationMetadataBuilder off-thread.
+ CancelOffThreadIonCompile(this);
+
+ allocationMetadataBuilder_ = nullptr;
+}
+
+void Realm::setNewObjectMetadata(JSContext* cx, HandleObject obj) {
+ MOZ_ASSERT(obj->maybeCCWRealm() == this);
+ cx->check(compartment(), obj);
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (JSObject* metadata =
+ allocationMetadataBuilder_->build(cx, obj, oomUnsafe)) {
+ MOZ_ASSERT(metadata->maybeCCWRealm() == obj->maybeCCWRealm());
+ cx->check(metadata);
+
+ if (!objects_.objectMetadataTable) {
+ auto table = cx->make_unique<ObjectWeakMap>(cx);
+ if (!table) {
+ oomUnsafe.crash("setNewObjectMetadata");
+ }
+
+ objects_.objectMetadataTable = std::move(table);
+ }
+
+ if (!objects_.objectMetadataTable->add(cx, obj, metadata)) {
+ oomUnsafe.crash("setNewObjectMetadata");
+ }
+ }
+}
+
+void Realm::updateDebuggerObservesFlag(unsigned flag) {
+ MOZ_ASSERT(isDebuggee());
+ MOZ_ASSERT(flag == DebuggerObservesAllExecution ||
+ flag == DebuggerObservesCoverage ||
+ flag == DebuggerObservesAsmJS || flag == DebuggerObservesWasm);
+
+ GlobalObject* global =
+ zone()->runtimeFromMainThread()->gc.isForegroundSweeping()
+ ? unsafeUnbarrieredMaybeGlobal()
+ : maybeGlobal();
+ bool observes = false;
+ if (flag == DebuggerObservesAllExecution) {
+ observes = DebugAPI::debuggerObservesAllExecution(global);
+ } else if (flag == DebuggerObservesCoverage) {
+ observes = DebugAPI::debuggerObservesCoverage(global);
+ } else if (flag == DebuggerObservesAsmJS) {
+ observes = DebugAPI::debuggerObservesAsmJS(global);
+ } else if (flag == DebuggerObservesWasm) {
+ observes = DebugAPI::debuggerObservesWasm(global);
+ }
+
+ if (observes) {
+ debugModeBits_ |= flag;
+ } else {
+ debugModeBits_ &= ~flag;
+ }
+}
+
+void Realm::setIsDebuggee() {
+ if (!isDebuggee()) {
+ debugModeBits_ |= IsDebuggee;
+ runtimeFromMainThread()->incrementNumDebuggeeRealms();
+ }
+}
+
+void Realm::unsetIsDebuggee() {
+ if (isDebuggee()) {
+ if (debuggerObservesCoverage()) {
+ runtime_->decrementNumDebuggeeRealmsObservingCoverage();
+ }
+ debugModeBits_ = 0;
+ DebugEnvironments::onRealmUnsetIsDebuggee(this);
+ runtimeFromMainThread()->decrementNumDebuggeeRealms();
+ }
+}
+
+void Realm::updateDebuggerObservesCoverage() {
+ bool previousState = debuggerObservesCoverage();
+ updateDebuggerObservesFlag(DebuggerObservesCoverage);
+ if (previousState == debuggerObservesCoverage()) {
+ return;
+ }
+
+ if (debuggerObservesCoverage()) {
+ // Interrupt any running interpreter frame. The scriptCounts are
+ // allocated on demand when a script resumes its execution.
+ JSContext* cx = TlsContext.get();
+ for (ActivationIterator iter(cx); !iter.done(); ++iter) {
+ if (iter->isInterpreter()) {
+ iter->asInterpreter()->enableInterruptsUnconditionally();
+ }
+ }
+ runtime_->incrementNumDebuggeeRealmsObservingCoverage();
+ return;
+ }
+
+ runtime_->decrementNumDebuggeeRealmsObservingCoverage();
+
+ // If code coverage is enabled by any other means, keep it.
+ if (collectCoverageForDebug()) {
+ return;
+ }
+
+ clearScriptCounts();
+ clearScriptLCov();
+}
+
+coverage::LCovRealm* Realm::lcovRealm() {
+ if (!lcovRealm_) {
+ lcovRealm_ = js::MakeUnique<coverage::LCovRealm>(this);
+ }
+ return lcovRealm_.get();
+}
+
+bool Realm::collectCoverageForDebug() const {
+ return debuggerObservesCoverage() || coverage::IsLCovEnabled();
+}
+
+void Realm::clearScriptCounts() { zone()->clearScriptCounts(this); }
+
+void Realm::clearScriptLCov() { zone()->clearScriptLCov(this); }
+
+void ObjectRealm::addSizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf, size_t* innerViewsArg,
+ size_t* objectMetadataTablesArg,
+ size_t* nonSyntacticLexicalEnvironmentsArg) {
+ *innerViewsArg += innerViews.sizeOfExcludingThis(mallocSizeOf);
+
+ if (objectMetadataTable) {
+ *objectMetadataTablesArg +=
+ objectMetadataTable->sizeOfIncludingThis(mallocSizeOf);
+ }
+
+ if (auto& map = nonSyntacticLexicalEnvironments_) {
+ *nonSyntacticLexicalEnvironmentsArg +=
+ map->sizeOfIncludingThis(mallocSizeOf);
+ }
+}
+
+void Realm::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* realmObject, size_t* realmTables,
+ size_t* innerViewsArg,
+ size_t* objectMetadataTablesArg,
+ size_t* savedStacksSet,
+ size_t* nonSyntacticLexicalEnvironmentsArg,
+ size_t* jitRealm) {
+ *realmObject += mallocSizeOf(this);
+ wasm.addSizeOfExcludingThis(mallocSizeOf, realmTables);
+
+ objects_.addSizeOfExcludingThis(mallocSizeOf, innerViewsArg,
+ objectMetadataTablesArg,
+ nonSyntacticLexicalEnvironmentsArg);
+
+ *savedStacksSet += savedStacks_.sizeOfExcludingThis(mallocSizeOf);
+
+ if (jitRealm_) {
+ *jitRealm += jitRealm_->sizeOfIncludingThis(mallocSizeOf);
+ }
+}
+
+bool Realm::shouldCaptureStackForThrow() {
+ // Determine whether a stack trace should be captured for throw-statements (or
+ // similar) in JS code in this realm. We don't want to do this unconditionally
+ // because capturing stacks is slow and some scripts throw a lot of
+ // exceptions.
+ //
+ // Note: this is unrelated to Error.stack! That property is observable from
+ // JS code so we can't use these heuristics there. The code here is mostly
+ // relevant for uncaught exceptions that are not Error objects.
+
+ // To match other browsers, we always capture a stack trace if the realm is a
+ // debuggee (this includes the devtools console being open) or if unlimited
+ // stack traces have been enabled for this realm (used in automation).
+ if (isDebuggee() || isUnlimitedStacksCapturingEnabled) {
+ return true;
+ }
+
+ // Also always capture for chrome code. This is code we control and this helps
+ // debugging.
+ if (principals() &&
+ principals() == runtimeFromMainThread()->trustedPrincipals()) {
+ return true;
+ }
+
+ // Else, capture the stack only for the first N exceptions so that we can
+ // still show stack traces for scripts that don't throw a lot of exceptions
+ // (if the console is opened later).
+ static constexpr uint16_t MaxStacksCapturedForThrow = 50;
+ if (numStacksCapturedForThrow_ > MaxStacksCapturedForThrow) {
+ return false;
+ }
+ numStacksCapturedForThrow_++;
+ return true;
+}
+
+mozilla::HashCodeScrambler Realm::randomHashCodeScrambler() {
+ return mozilla::HashCodeScrambler(randomKeyGenerator_.next(),
+ randomKeyGenerator_.next());
+}
+
+void AutoSetNewObjectMetadata::setPendingMetadata() {
+ JSObject* obj = cx_->realm()->getAndClearObjectPendingMetadata();
+ if (!obj) {
+ return;
+ }
+
+ MOZ_ASSERT(obj->getClass()->shouldDelayMetadataBuilder());
+
+ if (cx_->isExceptionPending()) {
+ return;
+ }
+
+ // This function is called from a destructor that often runs upon exit from
+ // a function that is returning an unrooted pointer to a Cell. The
+ // allocation metadata callback often allocates; if it causes a GC, then the
+ // Cell pointer being returned won't be traced or relocated.
+ //
+ // The only extant callbacks are those internal to SpiderMonkey that
+ // capture the JS stack. In fact, we're considering removing general
+ // callbacks altogther in bug 1236748. Since it's not running arbitrary
+ // code, it's adequate to simply suppress GC while we run the callback.
+ gc::AutoSuppressGC autoSuppressGC(cx_);
+
+ (void)SetNewObjectMetadata(cx_, obj);
+}
+
+JS_PUBLIC_API void gc::TraceRealm(JSTracer* trc, JS::Realm* realm,
+ const char* name) {
+ // The way GC works with compartments is basically incomprehensible.
+ // For Realms, what we want is very simple: each Realm has a strong
+ // reference to its GlobalObject, and vice versa.
+ //
+ // Here we simply trace our side of that edge. During GC,
+ // GCRuntime::traceRuntimeCommon() marks all other realm roots, for
+ // all realms.
+ realm->traceGlobalData(trc);
+}
+
+JS_PUBLIC_API JS::Realm* JS::GetCurrentRealmOrNull(JSContext* cx) {
+ return cx->realm();
+}
+
+JS_PUBLIC_API JS::Realm* JS::GetObjectRealmOrNull(JSObject* obj) {
+ return IsCrossCompartmentWrapper(obj) ? nullptr : obj->nonCCWRealm();
+}
+
+JS_PUBLIC_API void* JS::GetRealmPrivate(JS::Realm* realm) {
+ return realm->realmPrivate();
+}
+
+JS_PUBLIC_API void JS::SetRealmPrivate(JS::Realm* realm, void* data) {
+ realm->setRealmPrivate(data);
+}
+
+JS_PUBLIC_API void JS::SetDestroyRealmCallback(
+ JSContext* cx, JS::DestroyRealmCallback callback) {
+ cx->runtime()->destroyRealmCallback = callback;
+}
+
+JS_PUBLIC_API void JS::SetRealmNameCallback(JSContext* cx,
+ JS::RealmNameCallback callback) {
+ cx->runtime()->realmNameCallback = callback;
+}
+
+JS_PUBLIC_API JSObject* JS::GetRealmGlobalOrNull(JS::Realm* realm) {
+ return realm->maybeGlobal();
+}
+
+JS_PUBLIC_API bool JS::InitRealmStandardClasses(JSContext* cx) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ return GlobalObject::initStandardClasses(cx, cx->global());
+}
+
+JS_PUBLIC_API bool JS::MaybeFreezeCtorAndPrototype(JSContext* cx,
+ HandleObject ctor,
+ HandleObject maybeProto) {
+ if (MOZ_LIKELY(!cx->realm()->creationOptions().freezeBuiltins())) {
+ return true;
+ }
+ if (!SetIntegrityLevel(cx, ctor, IntegrityLevel::Frozen)) {
+ return false;
+ }
+ if (maybeProto) {
+ if (!SetIntegrityLevel(cx, maybeProto, IntegrityLevel::Sealed)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+JS_PUBLIC_API JSObject* JS::GetRealmObjectPrototype(JSContext* cx) {
+ CHECK_THREAD(cx);
+ return &cx->global()->getObjectPrototype();
+}
+
+JS_PUBLIC_API JS::Handle<JSObject*> JS::GetRealmObjectPrototypeHandle(
+ JSContext* cx) {
+ return cx->global()->getObjectPrototypeHandle();
+}
+
+JS_PUBLIC_API JSObject* JS::GetRealmFunctionPrototype(JSContext* cx) {
+ CHECK_THREAD(cx);
+ return &cx->global()->getFunctionPrototype();
+}
+
+JS_PUBLIC_API JSObject* JS::GetRealmArrayPrototype(JSContext* cx) {
+ CHECK_THREAD(cx);
+ return GlobalObject::getOrCreateArrayPrototype(cx, cx->global());
+}
+
+JS_PUBLIC_API JSObject* JS::GetRealmErrorPrototype(JSContext* cx) {
+ CHECK_THREAD(cx);
+ return GlobalObject::getOrCreateCustomErrorPrototype(cx, cx->global(),
+ JSEXN_ERR);
+}
+
+JS_PUBLIC_API JSObject* JS::GetRealmIteratorPrototype(JSContext* cx) {
+ CHECK_THREAD(cx);
+ return GlobalObject::getOrCreateIteratorPrototype(cx, cx->global());
+}
+
+JS_PUBLIC_API JSObject* JS::GetRealmAsyncIteratorPrototype(JSContext* cx) {
+ CHECK_THREAD(cx);
+ return GlobalObject::getOrCreateAsyncIteratorPrototype(cx, cx->global());
+}
+
+JS_PUBLIC_API JSObject* JS::GetRealmKeyObject(JSContext* cx) {
+ return GlobalObject::getOrCreateRealmKeyObject(cx, cx->global());
+}
+
+JS_PUBLIC_API Realm* JS::GetFunctionRealm(JSContext* cx, HandleObject objArg) {
+ // https://tc39.github.io/ecma262/#sec-getfunctionrealm
+ // 7.3.22 GetFunctionRealm ( obj )
+
+ CHECK_THREAD(cx);
+ cx->check(objArg);
+
+ RootedObject obj(cx, objArg);
+ while (true) {
+ obj = CheckedUnwrapStatic(obj);
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return nullptr;
+ }
+
+ // Step 1.
+ MOZ_ASSERT(IsCallable(obj));
+
+ // Steps 2 and 3. We use a loop instead of recursion to unwrap bound
+ // functions.
+ if (obj->is<JSFunction>()) {
+ return obj->as<JSFunction>().realm();
+ }
+ if (obj->is<BoundFunctionObject>()) {
+ obj = obj->as<BoundFunctionObject>().getTarget();
+ continue;
+ }
+
+ // WrappedFunctionObjects also have a [[Realm]] internal slot,
+ // which is the nonCCWRealm by construction.
+ if (obj->is<WrappedFunctionObject>()) {
+ return obj->nonCCWRealm();
+ }
+
+ // Step 4.
+ if (IsScriptedProxy(obj)) {
+ // Steps 4.a-b.
+ JSObject* proxyTarget = GetProxyTargetObject(obj);
+ if (!proxyTarget) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_PROXY_REVOKED);
+ return nullptr;
+ }
+
+ // Step 4.c.
+ obj = proxyTarget;
+ continue;
+ }
+
+ // Step 5.
+ return cx->realm();
+ }
+}
diff --git a/js/src/vm/Realm.h b/js/src/vm/Realm.h
new file mode 100644
index 0000000000..bf6e4c17ca
--- /dev/null
+++ b/js/src/vm/Realm.h
@@ -0,0 +1,886 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Realm_h
+#define vm_Realm_h
+
+#include "mozilla/Array.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/Variant.h"
+#include "mozilla/XorShift128PlusRNG.h"
+
+#include <stddef.h>
+
+#include "builtin/Array.h"
+#include "gc/Barrier.h"
+#include "js/GCVariant.h"
+#include "js/RealmOptions.h"
+#include "js/TelemetryTimers.h"
+#include "js/UniquePtr.h"
+#include "vm/ArrayBufferObject.h"
+#include "vm/JSContext.h"
+#include "vm/PromiseLookup.h" // js::PromiseLookup
+#include "vm/RegExpShared.h"
+#include "vm/SavedStacks.h"
+#include "wasm/WasmRealm.h"
+
+namespace js {
+
+namespace coverage {
+class LCovRealm;
+} // namespace coverage
+
+namespace jit {
+class JitRealm;
+} // namespace jit
+
+class AutoRestoreRealmDebugMode;
+class Debugger;
+class GlobalObject;
+class GlobalObjectData;
+class GlobalLexicalEnvironmentObject;
+class NonSyntacticLexicalEnvironmentObject;
+struct IdValuePair;
+struct NativeIterator;
+
+/*
+ * A single-entry cache for some base-10 double-to-string conversions. This
+ * helps date-format-xparb.js. It also avoids skewing the results for
+ * v8-splay.js when measured by the SunSpider harness, where the splay tree
+ * initialization (which includes many repeated double-to-string conversions)
+ * is erroneously included in the measurement; see bug 562553.
+ */
+class DtoaCache {
+ double dbl;
+ int base;
+ JSLinearString* str; // if str==nullptr, dbl and base are not valid
+
+ public:
+ DtoaCache() : str(nullptr) {}
+ void purge() { str = nullptr; }
+
+ JSLinearString* lookup(int b, double d) {
+ return str && b == base && d == dbl ? str : nullptr;
+ }
+
+ void cache(int b, double d, JSLinearString* s) {
+ base = b;
+ dbl = d;
+ str = s;
+ }
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkCacheAfterMovingGC();
+#endif
+};
+
+// Cache to speed up the group/shape lookup in ProxyObject::create. A proxy's
+// shape is only determined by the Class + proto, so a small cache for this is
+// very effective in practice.
+class NewProxyCache {
+ struct Entry {
+ Shape* shape;
+ };
+ static const size_t NumEntries = 4;
+ mozilla::UniquePtr<Entry[], JS::FreePolicy> entries_;
+
+ public:
+ MOZ_ALWAYS_INLINE bool lookup(const JSClass* clasp, TaggedProto proto,
+ Shape** shape) const {
+ if (!entries_) {
+ return false;
+ }
+ for (size_t i = 0; i < NumEntries; i++) {
+ const Entry& entry = entries_[i];
+ if (entry.shape && entry.shape->getObjectClass() == clasp &&
+ entry.shape->proto() == proto) {
+ *shape = entry.shape;
+ return true;
+ }
+ }
+ return false;
+ }
+ void add(Shape* shape) {
+ MOZ_ASSERT(shape);
+ if (!entries_) {
+ entries_.reset(js_pod_calloc<Entry>(NumEntries));
+ if (!entries_) {
+ return;
+ }
+ } else {
+ for (size_t i = NumEntries - 1; i > 0; i--) {
+ entries_[i] = entries_[i - 1];
+ }
+ }
+ entries_[0].shape = shape;
+ }
+ void purge() { entries_.reset(); }
+};
+
+// Cache for NewPlainObjectWithProperties. When the list of properties matches
+// a recently created object's shape, we can use this shape directly.
+class NewPlainObjectWithPropsCache {
+ static const size_t NumEntries = 4;
+ mozilla::Array<SharedShape*, NumEntries> entries_;
+
+ public:
+ NewPlainObjectWithPropsCache() { purge(); }
+
+ SharedShape* lookup(IdValuePair* properties, size_t nproperties) const;
+ void add(SharedShape* shape);
+
+ void purge() {
+ for (size_t i = 0; i < NumEntries; i++) {
+ entries_[i] = nullptr;
+ }
+ }
+};
+
+// [SMDOC] Object MetadataBuilder API
+//
+// We must ensure that all newly allocated JSObjects get their metadata
+// set. However, metadata builders may require the new object be in a sane
+// state (eg, have its reserved slots initialized so they can get the
+// sizeOfExcludingThis of the object). Therefore, for objects of certain
+// JSClasses (those marked with JSCLASS_DELAY_METADATA_BUILDER), it is not safe
+// for the allocation paths to call the object metadata builder
+// immediately. Instead, the JSClass-specific "constructor" C++ function up the
+// stack makes a promise that it will ensure that the new object has its
+// metadata set after the object is initialized.
+//
+// The js::AutoSetNewObjectMetadata RAII class provides an ergonomic way for
+// constructor functions to do this.
+//
+// In the presence of internal errors, we do not set the new object's metadata
+// (if it was even allocated).
+
+class PropertyIteratorObject;
+
+struct IteratorHashPolicy {
+ struct Lookup {
+ Shape** shapes;
+ size_t numShapes;
+ HashNumber shapesHash;
+
+ Lookup(Shape** shapes, size_t numShapes, HashNumber shapesHash)
+ : shapes(shapes), numShapes(numShapes), shapesHash(shapesHash) {
+ MOZ_ASSERT(numShapes > 0);
+ }
+ };
+ static HashNumber hash(const Lookup& lookup) { return lookup.shapesHash; }
+ static bool match(PropertyIteratorObject* obj, const Lookup& lookup);
+};
+
+class DebugEnvironments;
+class ObjectWeakMap;
+
+// ObjectRealm stores various tables and other state associated with particular
+// objects in a realm. To make sure the correct ObjectRealm is used for an
+// object, use of the ObjectRealm::get(obj) static method is required.
+class ObjectRealm {
+ // All non-syntactic lexical environments in the realm. These are kept in a
+ // map because when loading scripts into a non-syntactic environment, we
+ // need to use the same lexical environment to persist lexical bindings.
+ js::UniquePtr<js::ObjectWeakMap> nonSyntacticLexicalEnvironments_;
+
+ ObjectRealm(const ObjectRealm&) = delete;
+ void operator=(const ObjectRealm&) = delete;
+
+ public:
+ // Map from array buffers to views sharing that storage.
+ JS::WeakCache<js::InnerViewTable> innerViews;
+
+ // Keep track of the metadata objects which can be associated with each JS
+ // object. Both keys and values are in this realm.
+ js::UniquePtr<js::ObjectWeakMap> objectMetadataTable;
+
+ using IteratorCache =
+ js::HashSet<js::PropertyIteratorObject*, js::IteratorHashPolicy,
+ js::ZoneAllocPolicy>;
+ IteratorCache iteratorCache;
+
+ static inline ObjectRealm& get(const JSObject* obj);
+
+ explicit ObjectRealm(JS::Zone* zone);
+
+ void finishRoots();
+ void trace(JSTracer* trc);
+ void sweepAfterMinorGC(JSTracer* trc);
+
+ void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* innerViewsArg,
+ size_t* objectMetadataTablesArg,
+ size_t* nonSyntacticLexicalEnvironmentsArg);
+
+ js::NonSyntacticLexicalEnvironmentObject*
+ getOrCreateNonSyntacticLexicalEnvironment(JSContext* cx,
+ js::HandleObject enclosing);
+ js::NonSyntacticLexicalEnvironmentObject*
+ getOrCreateNonSyntacticLexicalEnvironment(JSContext* cx,
+ js::HandleObject enclosing,
+ js::HandleObject key,
+ js::HandleObject thisv);
+ js::NonSyntacticLexicalEnvironmentObject* getNonSyntacticLexicalEnvironment(
+ JSObject* key) const;
+};
+
+} // namespace js
+
+class JS::Realm : public JS::shadow::Realm {
+ JS::Zone* zone_;
+ JSRuntime* runtime_;
+
+ const JS::RealmCreationOptions creationOptions_;
+ JS::RealmBehaviors behaviors_;
+
+ friend struct ::JSContext;
+ js::WeakHeapPtr<js::GlobalObject*> global_;
+
+ // Note: this is private to enforce use of ObjectRealm::get(obj).
+ js::ObjectRealm objects_;
+ friend js::ObjectRealm& js::ObjectRealm::get(const JSObject*);
+
+ // See the "Object MetadataBuilder API" comment.
+ JSObject* objectPendingMetadata_ = nullptr;
+#ifdef DEBUG
+ uint32_t numActiveAutoSetNewObjectMetadata_ = 0;
+#endif
+
+ // Random number generator for Math.random().
+ mozilla::Maybe<mozilla::non_crypto::XorShift128PlusRNG>
+ randomNumberGenerator_;
+
+ // Random number generator for randomHashCodeScrambler().
+ mozilla::non_crypto::XorShift128PlusRNG randomKeyGenerator_;
+
+ JSPrincipals* principals_ = nullptr;
+
+ js::UniquePtr<js::jit::JitRealm> jitRealm_;
+
+ // Bookkeeping information for debug scope objects.
+ js::UniquePtr<js::DebugEnvironments> debugEnvs_;
+
+ js::SavedStacks savedStacks_;
+
+ // Used by memory reporters and invalid otherwise.
+ JS::RealmStats* realmStats_ = nullptr;
+
+ const js::AllocationMetadataBuilder* allocationMetadataBuilder_ = nullptr;
+ void* realmPrivate_ = nullptr;
+
+ // There are two ways to enter a realm:
+ //
+ // (1) AutoRealm (and JSAutoRealm, JS::EnterRealm)
+ // (2) When calling a cross-realm (but same-compartment) function in JIT
+ // code.
+ //
+ // This field only accounts for (1), to keep the JIT code as simple as
+ // possible.
+ //
+ // An important invariant is that the JIT can only switch to a different
+ // realm within the same compartment, so whenever that happens there must
+ // always be a same-compartment realm with enterRealmDepthIgnoringJit_ > 0.
+ // This lets us set Compartment::hasEnteredRealm without walking the
+ // stack.
+ unsigned enterRealmDepthIgnoringJit_ = 0;
+
+ public:
+ // Various timers for collecting time spent delazifying, jit compiling,
+ // executing, etc
+ JS::JSTimers timers;
+
+ struct DebuggerVectorEntry {
+ // The debugger relies on iterating through the DebuggerVector to know what
+ // debuggers to notify about certain actions, which it does using this
+ // pointer. We need an explicit Debugger* because the JSObject* from
+ // the DebuggerDebuggeeLink to the Debugger is only set some of the time.
+ // This `Debugger*` pointer itself could also live on the
+ // DebuggerDebuggeeLink itself, but that would then require all of the
+ // places that iterate over the realm's DebuggerVector to also traverse
+ // the CCW which seems like it would be needlessly complicated.
+ js::WeakHeapPtr<js::Debugger*> dbg;
+
+ // This links to the debugger's DebuggerDebuggeeLink object, via a CCW.
+ // Tracing this link from the realm allows the debugger to define
+ // whether pieces of the debugger should be held live by a given realm.
+ js::HeapPtr<JSObject*> debuggerLink;
+
+ DebuggerVectorEntry(js::Debugger* dbg_, JSObject* link);
+ };
+ using DebuggerVector =
+ js::Vector<DebuggerVectorEntry, 0, js::ZoneAllocPolicy>;
+
+ private:
+ DebuggerVector debuggers_;
+
+ enum {
+ IsDebuggee = 1 << 0,
+ DebuggerObservesAllExecution = 1 << 1,
+ DebuggerObservesAsmJS = 1 << 2,
+ DebuggerObservesCoverage = 1 << 3,
+ DebuggerObservesWasm = 1 << 4,
+ };
+ unsigned debugModeBits_ = 0;
+ friend class js::AutoRestoreRealmDebugMode;
+
+ bool isSystem_ = false;
+ bool allocatedDuringIncrementalGC_;
+ bool initializingGlobal_ = true;
+
+ js::UniquePtr<js::coverage::LCovRealm> lcovRealm_ = nullptr;
+
+ public:
+ // WebAssembly state for the realm.
+ js::wasm::Realm wasm;
+
+ js::RegExpRealm regExps;
+
+ js::DtoaCache dtoaCache;
+ js::NewProxyCache newProxyCache;
+ js::NewPlainObjectWithPropsCache newPlainObjectWithPropsCache;
+ js::ArraySpeciesLookup arraySpeciesLookup;
+ js::PromiseLookup promiseLookup;
+
+ // Last time at which an animation was played for this realm.
+ js::MainThreadData<mozilla::TimeStamp> lastAnimationTime;
+
+ /*
+ * For generational GC, record whether a write barrier has added this
+ * realm's global to the store buffer since the last minor GC.
+ *
+ * This is used to avoid calling into the VM every time a nursery object is
+ * written to a property of the global.
+ */
+ uint32_t globalWriteBarriered = 0;
+
+ // Counter for shouldCaptureStackForThrow.
+ uint16_t numStacksCapturedForThrow_ = 0;
+
+#ifdef DEBUG
+ bool firedOnNewGlobalObject = false;
+#endif
+
+ // True if all incoming wrappers have been nuked. This happens when
+ // NukeCrossCompartmentWrappers is called with the NukeAllReferences option.
+ // This prevents us from creating new wrappers for the compartment.
+ bool nukedIncomingWrappers = false;
+
+ // Enable async stack capturing for this realm even if
+ // JS::ContextOptions::asyncStackCaptureDebuggeeOnly_ is true.
+ //
+ // No-op when JS::ContextOptions::asyncStack_ is false, or
+ // JS::ContextOptions::asyncStackCaptureDebuggeeOnly_ is false.
+ //
+ // This can be used as a lightweight alternative for making the global
+ // debuggee, if the async stack capturing is necessary but no other debugging
+ // features are used.
+ bool isAsyncStackCapturingEnabled = false;
+
+ // Allow to collect more than 50 stack traces for throw even if the global is
+ // not a debuggee.
+ //
+ // Similarly to isAsyncStackCapturingEnabled, this is a lightweight
+ // alternative for making the global a debuggee, when no actual debugging
+ // features are required.
+ bool isUnlimitedStacksCapturingEnabled = false;
+
+ private:
+ void updateDebuggerObservesFlag(unsigned flag);
+
+ Realm(const Realm&) = delete;
+ void operator=(const Realm&) = delete;
+
+ public:
+ Realm(JS::Compartment* comp, const JS::RealmOptions& options);
+ ~Realm();
+
+ void init(JSContext* cx, JSPrincipals* principals);
+ void destroy(JS::GCContext* gcx);
+
+ void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* realmObject, size_t* realmTables,
+ size_t* innerViewsArg,
+ size_t* objectMetadataTablesArg,
+ size_t* savedStacksSet,
+ size_t* nonSyntacticLexicalEnvironmentsArg,
+ size_t* jitRealm);
+
+ JS::Zone* zone() { return zone_; }
+ const JS::Zone* zone() const { return zone_; }
+
+ JSRuntime* runtimeFromMainThread() const {
+ MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(runtime_));
+ return runtime_;
+ }
+
+ // Note: Unrestricted access to the runtime from an arbitrary thread
+ // can easily lead to races. Use this method very carefully.
+ JSRuntime* runtimeFromAnyThread() const { return runtime_; }
+
+ const JS::RealmCreationOptions& creationOptions() const {
+ return creationOptions_;
+ }
+
+ // NOTE: Do not provide accessor for mutable reference.
+ // Modifying RealmBehaviors after creating a realm can result in
+ // inconsistency.
+ const JS::RealmBehaviors& behaviors() const { return behaviors_; }
+
+ void setNonLive() { behaviors_.setNonLive(); }
+
+ /* Whether to preserve JIT code on non-shrinking GCs. */
+ bool preserveJitCode() { return creationOptions_.preserveJitCode(); }
+
+ /* The global object for this realm.
+ *
+ * Note: the global_ field is null briefly during GC, after the global
+ * object is collected; but when that happens the Realm is destroyed during
+ * the same GC.)
+ *
+ * In contrast, JSObject::global() is infallible because marking a JSObject
+ * always marks its global as well.
+ */
+ inline js::GlobalObject* maybeGlobal() const;
+
+ /* An unbarriered getter for use while tracing. */
+ js::GlobalObject* unsafeUnbarrieredMaybeGlobal() const {
+ return global_.unbarrieredGet();
+ }
+
+ /* True if a global exists and it's not being collected. */
+ inline bool hasLiveGlobal() const;
+
+ /* True if a global exists and has been successfully initialized. */
+ inline bool hasInitializedGlobal() const;
+
+ inline void initGlobal(js::GlobalObject& global);
+ void clearInitializingGlobal() { initializingGlobal_ = false; }
+
+ /*
+ * This method traces data that is live iff we know that this realm's
+ * global is still live.
+ */
+ void traceGlobalData(JSTracer* trc);
+
+ void traceWeakGlobalEdge(JSTracer* trc);
+
+ /*
+ * This method traces Realm-owned GC roots that are considered live
+ * regardless of whether the realm's global is still live.
+ */
+ void traceRoots(JSTracer* trc,
+ js::gc::GCRuntime::TraceOrMarkRuntime traceOrMark);
+ /*
+ * This method clears out tables of roots in preparation for the final GC.
+ */
+ void finishRoots();
+
+ void sweepAfterMinorGC(JSTracer* trc);
+ void traceWeakDebugEnvironmentEdges(JSTracer* trc);
+ void traceWeakRegExps(JSTracer* trc);
+
+ void clearScriptCounts();
+ void clearScriptLCov();
+
+ void purge();
+
+ void fixupAfterMovingGC(JSTracer* trc);
+
+ void enter() { enterRealmDepthIgnoringJit_++; }
+ void leave() {
+ MOZ_ASSERT(enterRealmDepthIgnoringJit_ > 0);
+ enterRealmDepthIgnoringJit_--;
+ }
+ bool hasBeenEnteredIgnoringJit() const {
+ return enterRealmDepthIgnoringJit_ > 0;
+ }
+ bool shouldTraceGlobal() const {
+ // If we entered this realm in JIT code, there must be a script and
+ // function on the stack for this realm, so the global will definitely
+ // be traced and it's safe to return false here.
+ return hasBeenEnteredIgnoringJit();
+ }
+
+ bool hasAllocationMetadataBuilder() const {
+ return allocationMetadataBuilder_;
+ }
+ const js::AllocationMetadataBuilder* getAllocationMetadataBuilder() const {
+ return allocationMetadataBuilder_;
+ }
+ const void* addressOfMetadataBuilder() const {
+ return &allocationMetadataBuilder_;
+ }
+ bool isRecordingAllocations();
+ void setAllocationMetadataBuilder(
+ const js::AllocationMetadataBuilder* builder);
+ void forgetAllocationMetadataBuilder();
+ void setNewObjectMetadata(JSContext* cx, JS::HandleObject obj);
+
+ bool hasObjectPendingMetadata() const {
+ MOZ_ASSERT_IF(objectPendingMetadata_, hasAllocationMetadataBuilder());
+ return objectPendingMetadata_ != nullptr;
+ }
+ void setObjectPendingMetadata(JSObject* obj) {
+ MOZ_ASSERT(numActiveAutoSetNewObjectMetadata_ > 0,
+ "Must not use JSCLASS_DELAY_METADATA_BUILDER without "
+ "AutoSetNewObjectMetadata");
+ MOZ_ASSERT(!objectPendingMetadata_);
+ MOZ_ASSERT(obj);
+ if (MOZ_UNLIKELY(hasAllocationMetadataBuilder())) {
+ objectPendingMetadata_ = obj;
+ }
+ }
+ JSObject* getAndClearObjectPendingMetadata() {
+ MOZ_ASSERT(hasAllocationMetadataBuilder());
+ JSObject* obj = objectPendingMetadata_;
+ objectPendingMetadata_ = nullptr;
+ return obj;
+ }
+
+#ifdef DEBUG
+ void incNumActiveAutoSetNewObjectMetadata() {
+ numActiveAutoSetNewObjectMetadata_++;
+ }
+ void decNumActiveAutoSetNewObjectMetadata() {
+ MOZ_ASSERT(numActiveAutoSetNewObjectMetadata_ > 0);
+ numActiveAutoSetNewObjectMetadata_--;
+ }
+#endif
+
+ void* realmPrivate() const { return realmPrivate_; }
+ void setRealmPrivate(void* p) { realmPrivate_ = p; }
+
+ // This should only be called when it is non-null, i.e. during memory
+ // reporting.
+ JS::RealmStats& realmStats() {
+ // We use MOZ_RELEASE_ASSERT here because in bug 1132502 there was some
+ // (inconclusive) evidence that realmStats_ can be nullptr unexpectedly.
+ MOZ_RELEASE_ASSERT(realmStats_);
+ return *realmStats_;
+ }
+ void nullRealmStats() {
+ MOZ_ASSERT(realmStats_);
+ realmStats_ = nullptr;
+ }
+ void setRealmStats(JS::RealmStats* newStats) {
+ MOZ_ASSERT(!realmStats_ && newStats);
+ realmStats_ = newStats;
+ }
+
+ inline bool marked() const;
+ void clearAllocatedDuringGC() { allocatedDuringIncrementalGC_ = false; }
+
+ /*
+ * The principals associated with this realm. Note that the same several
+ * realms may share the same principals and that a realm may change
+ * principals during its lifetime (e.g. in case of lazy parsing).
+ */
+ JSPrincipals* principals() { return principals_; }
+ void setPrincipals(JSPrincipals* principals) { principals_ = principals; }
+
+ bool isSystem() const { return isSystem_; }
+ //
+ // The Debugger observes execution on a frame-by-frame basis. The
+ // invariants of Realm's debug mode bits, JSScript::isDebuggee,
+ // InterpreterFrame::isDebuggee, and BaselineFrame::isDebuggee are
+ // enumerated below.
+ //
+ // 1. When a realm's isDebuggee() == true, relazification and lazy
+ // parsing are disabled.
+ //
+ // Whether AOT wasm is disabled is togglable by the Debugger API. By
+ // default it is disabled. See debuggerObservesAsmJS below.
+ //
+ // 2. When a realm's debuggerObservesAllExecution() == true, all of
+ // the realm's scripts are considered debuggee scripts.
+ //
+ // 3. A script is considered a debuggee script either when, per above, its
+ // realm is observing all execution, or if it has breakpoints set.
+ //
+ // 4. A debuggee script always pushes a debuggee frame.
+ //
+ // 5. A debuggee frame calls all slow path Debugger hooks in the
+ // Interpreter and Baseline. A debuggee frame implies that its script's
+ // BaselineScript, if extant, has been compiled with debug hook calls.
+ //
+ // 6. A debuggee script or a debuggee frame (i.e., during OSR) ensures
+ // that the compiled BaselineScript is compiled with debug hook calls
+ // when attempting to enter Baseline.
+ //
+ // 7. A debuggee script or a debuggee frame (i.e., during OSR) does not
+ // attempt to enter Ion.
+ //
+ // Note that a debuggee frame may exist without its script being a
+ // debuggee script. e.g., Debugger.Frame.prototype.eval only marks the
+ // frame in which it is evaluating as a debuggee frame.
+ //
+
+ // True if this realm's global is a debuggee of some Debugger
+ // object.
+ bool isDebuggee() const { return !!(debugModeBits_ & IsDebuggee); }
+
+ void setIsDebuggee();
+ void unsetIsDebuggee();
+
+ DebuggerVector& getDebuggers(const JS::AutoRequireNoGC& nogc) {
+ return debuggers_;
+ };
+ bool hasDebuggers() const { return !debuggers_.empty(); }
+
+ // True if this compartment's global is a debuggee of some Debugger
+ // object with a live hook that observes all execution; e.g.,
+ // onEnterFrame.
+ bool debuggerObservesAllExecution() const {
+ static const unsigned Mask = IsDebuggee | DebuggerObservesAllExecution;
+ return (debugModeBits_ & Mask) == Mask;
+ }
+ void updateDebuggerObservesAllExecution() {
+ updateDebuggerObservesFlag(DebuggerObservesAllExecution);
+ }
+
+ // True if this realm's global is a debuggee of some Debugger object
+ // whose allowUnobservedAsmJS flag is false.
+ bool debuggerObservesAsmJS() const {
+ static const unsigned Mask = IsDebuggee | DebuggerObservesAsmJS;
+ return (debugModeBits_ & Mask) == Mask;
+ }
+ void updateDebuggerObservesAsmJS() {
+ updateDebuggerObservesFlag(DebuggerObservesAsmJS);
+ }
+
+ // True if this realm's global is a debuggee of some Debugger object
+ // whose allowUnobservedWasm flag is false.
+ //
+ // Note that since AOT wasm functions cannot bail out, this flag really
+ // means "observe wasm from this point forward". We cannot make
+ // already-compiled wasm code observable to Debugger.
+ bool debuggerObservesWasm() const {
+ static const unsigned Mask = IsDebuggee | DebuggerObservesWasm;
+ return (debugModeBits_ & Mask) == Mask;
+ }
+ void updateDebuggerObservesWasm() {
+ updateDebuggerObservesFlag(DebuggerObservesWasm);
+ }
+
+ // True if this realm's global is a debuggee of some Debugger object
+ // whose collectCoverageInfo flag is true.
+ bool debuggerObservesCoverage() const {
+ static const unsigned Mask = DebuggerObservesCoverage;
+ return (debugModeBits_ & Mask) == Mask;
+ }
+ void updateDebuggerObservesCoverage();
+
+ // Returns true if the Debugger API is collecting code coverage data for this
+ // realm or if the process-wide LCov option is enabled.
+ bool collectCoverageForDebug() const;
+
+ // Get or allocate the associated LCovRealm.
+ js::coverage::LCovRealm* lcovRealm();
+
+ bool shouldCaptureStackForThrow();
+
+ // Initializes randomNumberGenerator if needed.
+ mozilla::non_crypto::XorShift128PlusRNG& getOrCreateRandomNumberGenerator();
+
+ const mozilla::non_crypto::XorShift128PlusRNG*
+ addressOfRandomNumberGenerator() const {
+ return randomNumberGenerator_.ptr();
+ }
+
+ mozilla::HashCodeScrambler randomHashCodeScrambler();
+
+ bool ensureJitRealmExists(JSContext* cx);
+ void traceWeakEdgesInJitRealm(JSTracer* trc);
+
+ js::jit::JitRealm* jitRealm() { return jitRealm_.get(); }
+
+ js::DebugEnvironments* debugEnvs() { return debugEnvs_.get(); }
+ js::UniquePtr<js::DebugEnvironments>& debugEnvsRef() { return debugEnvs_; }
+
+ js::SavedStacks& savedStacks() { return savedStacks_; }
+
+ // Recompute the probability with which this realm should record
+ // profiling data (stack traces, allocations log, etc.) about each
+ // allocation. We first consult the JS runtime to see if it is recording
+ // allocations, and if not then check the probabilities requested by the
+ // Debugger instances observing us, if any.
+ void chooseAllocationSamplingProbability() {
+ savedStacks_.chooseSamplingProbability(this);
+ }
+
+ void traceWeakSavedStacks(JSTracer* trc);
+
+ static constexpr size_t offsetOfCompartment() {
+ return offsetof(JS::Realm, compartment_);
+ }
+ static constexpr size_t offsetOfRegExps() {
+ return offsetof(JS::Realm, regExps);
+ }
+ static constexpr size_t offsetOfJitRealm() {
+ return offsetof(JS::Realm, jitRealm_);
+ }
+ static constexpr size_t offsetOfDebugModeBits() {
+ return offsetof(JS::Realm, debugModeBits_);
+ }
+ static constexpr uint32_t debugModeIsDebuggeeBit() { return IsDebuggee; }
+
+ // Note: similar to cx->global(), JIT code can omit the read barrier for the
+ // context's active global.
+ static constexpr size_t offsetOfActiveGlobal() {
+ static_assert(sizeof(global_) == sizeof(uintptr_t),
+ "JIT code assumes field is pointer-sized");
+ return offsetof(JS::Realm, global_);
+ }
+
+ private:
+ void purgeForOfPicChain();
+};
+
+inline js::Handle<js::GlobalObject*> JSContext::global() const {
+ /*
+ * It's safe to use |unbarrieredGet()| here because any realm that is on-stack
+ * will be marked automatically, so there's no need for a read barrier on
+ * it. Once the realm is popped, the handle is no longer safe to use.
+ */
+ MOZ_ASSERT(realm_, "Caller needs to enter a realm first");
+ return js::Handle<js::GlobalObject*>::fromMarkedLocation(
+ realm_->global_.unbarrieredAddress());
+}
+
+namespace js {
+
+class MOZ_RAII AssertRealmUnchanged {
+ public:
+ explicit AssertRealmUnchanged(JSContext* cx)
+ : cx(cx), oldRealm(cx->realm()) {}
+
+ ~AssertRealmUnchanged() { MOZ_ASSERT(cx->realm() == oldRealm); }
+
+ protected:
+ JSContext* const cx;
+ JS::Realm* const oldRealm;
+};
+
+// AutoRealm can be used to enter the realm of a JSObject, JSScript or
+// ObjectGroup. It must not be used with cross-compartment wrappers, because
+// CCWs are not associated with a single realm.
+class AutoRealm {
+ JSContext* const cx_;
+ JS::Realm* const origin_;
+
+ public:
+ template <typename T>
+ inline AutoRealm(JSContext* cx, const T& target);
+ inline ~AutoRealm();
+
+ JSContext* context() const { return cx_; }
+ JS::Realm* origin() const { return origin_; }
+
+ protected:
+ inline AutoRealm(JSContext* cx, JS::Realm* target);
+
+ private:
+ AutoRealm(const AutoRealm&) = delete;
+ AutoRealm& operator=(const AutoRealm&) = delete;
+};
+
+class MOZ_RAII AutoAllocInAtomsZone {
+ JSContext* const cx_;
+ JS::Realm* const origin_;
+ AutoAllocInAtomsZone(const AutoAllocInAtomsZone&) = delete;
+ AutoAllocInAtomsZone& operator=(const AutoAllocInAtomsZone&) = delete;
+
+ public:
+ inline explicit AutoAllocInAtomsZone(JSContext* cx);
+ inline ~AutoAllocInAtomsZone();
+};
+
+// During GC we sometimes need to enter a realm when we may have been allocating
+// in the the atoms zone. This leaves the atoms zone temporarily. This happens
+// in embedding callbacks and when we need to mark object groups as pretenured.
+class MOZ_RAII AutoMaybeLeaveAtomsZone {
+ JSContext* const cx_;
+ bool wasInAtomsZone_;
+ AutoMaybeLeaveAtomsZone(const AutoMaybeLeaveAtomsZone&) = delete;
+ AutoMaybeLeaveAtomsZone& operator=(const AutoMaybeLeaveAtomsZone&) = delete;
+
+ public:
+ inline explicit AutoMaybeLeaveAtomsZone(JSContext* cx);
+ inline ~AutoMaybeLeaveAtomsZone();
+};
+
+// Enter a realm directly. Only use this where there's no target GC thing
+// to pass to AutoRealm or where you need to avoid the assertions in
+// JS::Compartment::enterCompartmentOf().
+class AutoRealmUnchecked : protected AutoRealm {
+ public:
+ inline AutoRealmUnchecked(JSContext* cx, JS::Realm* target);
+};
+
+// Similar to AutoRealm, but this uses GetFunctionRealm in the spec, and
+// handles both bound functions and proxies.
+//
+// If GetFunctionRealm fails for the following reasons, this does nothing:
+// * `fun` is revoked proxy
+// * unwrapping failed because of a security wrapper
+class AutoFunctionOrCurrentRealm {
+ mozilla::Maybe<AutoRealmUnchecked> ar_;
+
+ public:
+ inline AutoFunctionOrCurrentRealm(JSContext* cx, js::HandleObject fun);
+ ~AutoFunctionOrCurrentRealm() = default;
+
+ private:
+ AutoFunctionOrCurrentRealm(const AutoFunctionOrCurrentRealm&) = delete;
+ AutoFunctionOrCurrentRealm& operator=(const AutoFunctionOrCurrentRealm&) =
+ delete;
+};
+
+/*
+ * Use this to change the behavior of an AutoRealm slightly on error. If
+ * the exception happens to be an Error object, copy it to the origin
+ * compartment instead of wrapping it.
+ */
+class ErrorCopier {
+ mozilla::Maybe<AutoRealm>& ar;
+
+ public:
+ explicit ErrorCopier(mozilla::Maybe<AutoRealm>& ar) : ar(ar) {}
+ ~ErrorCopier();
+};
+
+// See the "Object MetadataBuilder API" comment.
+class MOZ_RAII AutoSetNewObjectMetadata {
+ JSContext* cx_;
+
+ AutoSetNewObjectMetadata(const AutoSetNewObjectMetadata& aOther) = delete;
+ void operator=(const AutoSetNewObjectMetadata& aOther) = delete;
+
+ void setPendingMetadata();
+
+ public:
+ explicit inline AutoSetNewObjectMetadata(JSContext* cx) : cx_(cx) {
+#ifdef DEBUG
+ MOZ_ASSERT(cx->isMainThreadContext());
+ MOZ_ASSERT(!cx->realm()->hasObjectPendingMetadata());
+ cx_->realm()->incNumActiveAutoSetNewObjectMetadata();
+#endif
+ }
+ inline ~AutoSetNewObjectMetadata() {
+#ifdef DEBUG
+ cx_->realm()->decNumActiveAutoSetNewObjectMetadata();
+#endif
+ if (MOZ_UNLIKELY(cx_->realm()->hasAllocationMetadataBuilder())) {
+ setPendingMetadata();
+ }
+ }
+};
+
+} /* namespace js */
+
+#endif /* vm_Realm_h */
diff --git a/js/src/vm/RecordTupleShared.cpp b/js/src/vm/RecordTupleShared.cpp
new file mode 100644
index 0000000000..0358865883
--- /dev/null
+++ b/js/src/vm/RecordTupleShared.cpp
@@ -0,0 +1,133 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_RecordTupleShared_h
+#define vm_RecordTupleShared_h
+
+#include "vm/RecordTupleShared.h"
+
+#include "NamespaceImports.h"
+#include "builtin/RecordObject.h"
+#include "builtin/TupleObject.h"
+#include "js/Value.h"
+#include "vm/Compartment.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSObject.h"
+#include "vm/NativeObject.h"
+
+#include "gc/Marking-inl.h"
+
+namespace js {
+
+bool IsExtendedPrimitive(const JSObject& obj) {
+ return obj.is<RecordType>() || obj.is<TupleType>();
+}
+
+template <typename T>
+JSObject* CopyExtendedPrimitiveHelper(JSContext* cx, HandleObject extPrim) {
+ MOZ_ASSERT(gc::MaybeForwardedObjectIs<T>(extPrim));
+ Rooted<T*> in(cx, &extPrim->template as<T>());
+ Rooted<T*> out(cx);
+ if (!T::copy(cx, in, &out)) {
+ return nullptr;
+ }
+ return out;
+}
+
+JSObject* CopyExtendedPrimitive(JSContext* cx, HandleObject extPrim) {
+ if (gc::MaybeForwardedObjectIs<RecordType>(extPrim)) {
+ return CopyExtendedPrimitiveHelper<RecordType>(cx, extPrim);
+ }
+ MOZ_ASSERT(gc::MaybeForwardedObjectIs<TupleType>(extPrim));
+ return CopyExtendedPrimitiveHelper<TupleType>(cx, extPrim);
+}
+
+// Returns false if v is not a valid record/tuple element (e.g. it's an Object
+// or Symbol
+bool CopyRecordTupleElement(JSContext* cx, HandleValue v,
+ MutableHandleValue out) {
+ switch (v.type()) {
+ case JS::ValueType::Double:
+ case JS::ValueType::Int32:
+ case JS::ValueType::Undefined:
+ case JS::ValueType::Boolean: {
+ out.set(v);
+ break;
+ }
+ case JS::ValueType::String: {
+ RootedString vStr(cx, v.toString());
+ JSString* copy = CopyStringPure(cx, vStr);
+ if (!copy) {
+ return false;
+ }
+ out.setString(copy);
+ break;
+ }
+ case JS::ValueType::BigInt: {
+ RootedBigInt bi(cx, v.toBigInt());
+ BigInt* copy = BigInt::copy(cx, bi);
+ if (!copy) {
+ return false;
+ }
+ out.setBigInt(copy);
+ break;
+ }
+ case JS::ValueType::ExtendedPrimitive: {
+ RootedObject extPrim(cx, &v.toExtendedPrimitive());
+ JSObject* copy = CopyExtendedPrimitive(cx, extPrim);
+ if (!copy) {
+ return false;
+ }
+ out.setExtendedPrimitive(*copy);
+ break;
+ }
+ default:
+ MOZ_CRASH("CopyRecordTupleElement(): unexpected element type");
+ }
+ return true;
+}
+
+bool gc::MaybeForwardedIsExtendedPrimitive(const JSObject& obj) {
+ return MaybeForwardedObjectIs<RecordType>(&obj) ||
+ MaybeForwardedObjectIs<TupleType>(&obj);
+}
+
+bool IsExtendedPrimitiveWrapper(const JSObject& obj) {
+ return obj.is<RecordObject>() || obj.is<TupleObject>();
+}
+
+bool ExtendedPrimitiveGetProperty(JSContext* cx, HandleObject obj,
+ HandleValue receiver, HandleId id,
+ MutableHandleValue vp) {
+ MOZ_ASSERT(IsExtendedPrimitive(*obj));
+
+ if (obj->is<RecordType>()) {
+ if (obj->as<RecordType>().getOwnProperty(cx, id, vp)) {
+ return true;
+ }
+ // If records will not have a null prototype, this should use a mehanism
+ // similar to tuples.
+ vp.set(JS::UndefinedValue());
+ return true;
+ }
+
+ MOZ_ASSERT(obj->is<TupleType>());
+ if (obj->as<TupleType>().getOwnProperty(id, vp)) {
+ return true;
+ }
+
+ JSObject* proto = GlobalObject::getOrCreateTuplePrototype(cx, cx->global());
+ if (!proto) {
+ return false;
+ }
+
+ Rooted<NativeObject*> rootedProto(cx, &proto->as<NativeObject>());
+ return NativeGetProperty(cx, rootedProto, receiver, id, vp);
+}
+
+} // namespace js
+
+#endif
diff --git a/js/src/vm/RecordTupleShared.h b/js/src/vm/RecordTupleShared.h
new file mode 100644
index 0000000000..c666738bb7
--- /dev/null
+++ b/js/src/vm/RecordTupleShared.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_RecordTupleShared_h
+#define vm_RecordTupleShared_h
+
+#include "NamespaceImports.h"
+
+#include "js/ErrorReport.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+
+bool IsExtendedPrimitive(const JSObject& obj);
+JSObject* CopyExtendedPrimitive(JSContext* cx, HandleObject extPrim);
+bool CopyRecordTupleElement(JSContext* cx, HandleValue v,
+ MutableHandleValue out);
+bool IsExtendedPrimitiveWrapper(const JSObject& obj);
+bool ExtendedPrimitiveGetProperty(JSContext* cx, JS::HandleObject obj,
+ JS::HandleValue receiver, JS::HandleId id,
+ JS::MutableHandleValue vp);
+
+namespace gc {
+bool MaybeForwardedIsExtendedPrimitive(const JSObject& obj);
+} // namespace gc
+
+} // namespace js
+
+#endif
diff --git a/js/src/vm/RecordType.cpp b/js/src/vm/RecordType.cpp
new file mode 100644
index 0000000000..d63fee6394
--- /dev/null
+++ b/js/src/vm/RecordType.cpp
@@ -0,0 +1,538 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/RecordType.h"
+
+#include "mozilla/Assertions.h"
+
+#include "jsapi.h"
+
+#include "gc/Nursery.h"
+#include "js/Array.h"
+#include "js/TypeDecls.h"
+#include "js/Value.h"
+#include "util/StringBuffer.h"
+#include "vm/ArrayObject.h"
+#include "vm/EqualityOperations.h"
+#include "vm/JSAtom.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/NativeObject.h"
+#include "vm/ObjectFlags.h"
+#include "vm/PropertyInfo.h"
+#include "vm/PropMap.h"
+#include "vm/RecordTupleShared.h"
+#include "vm/StringType.h"
+#include "vm/ToSource.h"
+#include "vm/TupleType.h"
+
+#include "vm/JSAtom-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+
+static bool RecordConstructor(JSContext* cx, unsigned argc, Value* vp);
+
+const JSClass RecordType::class_ = {"record",
+ JSCLASS_HAS_RESERVED_SLOTS(SLOT_COUNT),
+ JS_NULL_CLASS_OPS, &RecordType::classSpec_};
+
+const ClassSpec RecordType::classSpec_ = {
+ GenericCreateConstructor<RecordConstructor, 1, gc::AllocKind::FUNCTION>,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr};
+
+Shape* RecordType::getInitialShape(JSContext* cx) {
+ return SharedShape::getInitialShape(cx, &RecordType::class_, cx->realm(),
+ TaggedProto(nullptr), SLOT_COUNT);
+}
+
+bool RecordType::copy(JSContext* cx, Handle<RecordType*> in,
+ MutableHandle<RecordType*> out) {
+ uint32_t len = in->length();
+ out.set(RecordType::createUninitialized(cx, len));
+ if (!out) {
+ return false;
+ }
+ RootedId k(cx);
+ RootedValue v(cx), vCopy(cx);
+ ArrayObject& sortedKeys = in->getFixedSlot(RecordType::SORTED_KEYS_SLOT)
+ .toObject()
+ .as<ArrayObject>();
+ for (uint32_t i = 0; i < len; i++) {
+ // Get the ith record key and convert it to a string, then to an id `k`
+ Value kVal = sortedKeys.getDenseElement(i);
+ MOZ_ASSERT(kVal.isString());
+ k.set(AtomToId(&kVal.toString()->asAtom()));
+ cx->markId(k);
+
+ // Get the value corresponding to `k`
+ MOZ_ALWAYS_TRUE(in->getOwnProperty(cx, k, &v));
+
+ // Copy `v` for the new record
+ if (!CopyRecordTupleElement(cx, v, &vCopy)) {
+ return false;
+ }
+
+ // Set `k` to `v` in the new record
+ if (!out->initializeNextProperty(cx, k, vCopy)) {
+ return false;
+ }
+ }
+ return out->finishInitialization(cx);
+}
+
+uint32_t RecordType::length() {
+ ArrayObject& sortedKeys =
+ getFixedSlot(SORTED_KEYS_SLOT).toObject().as<ArrayObject>();
+
+ return sortedKeys.getDenseInitializedLength();
+}
+
+RecordType* RecordType::createUninitialized(JSContext* cx,
+ uint32_t initialLength) {
+ Rooted<Shape*> shape(cx, getInitialShape(cx));
+ if (!shape) {
+ return nullptr;
+ }
+
+ Rooted<RecordType*> rec(
+ cx, cx->newCell<RecordType>(NewObjectGCKind(), gc::Heap::Default,
+ &RecordType::class_));
+ if (!rec) {
+ return nullptr;
+ }
+ rec->initShape(shape);
+ rec->setEmptyElements();
+ rec->initEmptyDynamicSlots();
+ rec->initFixedSlots(SLOT_COUNT);
+
+ Rooted<ArrayObject*> sortedKeys(
+ cx, NewDenseFullyAllocatedArray(cx, initialLength));
+ if (!sortedKeys) {
+ return nullptr;
+ }
+
+ rec->initFixedSlot(SORTED_KEYS_SLOT, ObjectValue(*sortedKeys));
+ rec->initFixedSlot(IS_ATOMIZED_SLOT, BooleanValue(false));
+
+ return rec;
+}
+
+bool RecordType::initializeNextProperty(JSContext* cx, HandleId key,
+ HandleValue value) {
+ if (key.isSymbol()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_RECORD_NO_SYMBOL_KEY);
+ return false;
+ }
+
+ if (!value.isPrimitive()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_RECORD_TUPLE_NO_OBJECT);
+ return false;
+ }
+
+ mozilla::Maybe<PropertyInfo> prop = lookupPure(key);
+
+ if (prop.isSome()) {
+ MOZ_ASSERT(prop.value().hasSlot());
+ setSlot(prop.value().slot(), value);
+ return true;
+ }
+
+ constexpr PropertyFlags propFlags = {PropertyFlag::Enumerable};
+ Rooted<NativeObject*> target(cx, this);
+ uint32_t slot;
+ if (!NativeObject::addProperty(cx, target, key, propFlags, &slot)) {
+ return false;
+ }
+ initSlot(slot, value);
+
+ // Add the key to the SORTED_KEYS internal slot
+
+ JSAtom* atomKey = key.isString() ? AtomizeString(cx, key.toString())
+ : Int32ToAtom(cx, key.toInt());
+ if (!atomKey) {
+ return false;
+ }
+
+ ArrayObject* sortedKeys =
+ &getFixedSlot(SORTED_KEYS_SLOT).toObject().as<ArrayObject>();
+ uint32_t initializedLength = sortedKeys->getDenseInitializedLength();
+
+ if (!sortedKeys->ensureElements(cx, initializedLength + 1)) {
+ return false;
+ }
+ sortedKeys->setDenseInitializedLength(initializedLength + 1);
+ sortedKeys->initDenseElement(initializedLength, StringValue(atomKey));
+
+ return true;
+}
+
+bool RecordType::finishInitialization(JSContext* cx) {
+ Rooted<NativeObject*> obj(cx, this);
+ if (!JSObject::setFlag(cx, obj, ObjectFlag::NotExtensible)) {
+ return false;
+ }
+ if (!ObjectElements::FreezeOrSeal(cx, obj, IntegrityLevel::Frozen)) {
+ return false;
+ }
+
+ ArrayObject& sortedKeys =
+ getFixedSlot(SORTED_KEYS_SLOT).toObject().as<ArrayObject>();
+ uint32_t length = sortedKeys.getDenseInitializedLength();
+
+ Rooted<JSLinearString*> tmpKey(cx);
+
+ // Sort the keys. This is insertion sort - O(n^2) - but it's ok for now
+ // becase records are probably not too big anyway.
+ for (uint32_t i = 1, j; i < length; i++) {
+#define KEY(index) sortedKeys.getDenseElement(index)
+#define KEY_S(index) &KEY(index).toString()->asLinear()
+
+ MOZ_ASSERT(KEY(i).isString());
+ MOZ_ASSERT(KEY(i).toString()->isLinear());
+
+ tmpKey = KEY_S(i);
+
+ for (j = i; j > 0 && CompareStrings(KEY_S(j - 1), tmpKey) > 0; j--) {
+ sortedKeys.setDenseElement(j, KEY(j - 1));
+ }
+
+ sortedKeys.setDenseElement(j, StringValue(tmpKey));
+
+#undef KEY
+#undef KEY_S
+ }
+
+ // We preallocate 1 element for each object spread. If spreads end up
+ // introducing zero elements, we can then shrink the sortedKeys array.
+ sortedKeys.setDenseInitializedLength(length);
+ sortedKeys.setLength(length);
+ sortedKeys.setNonWritableLength(cx);
+
+ MOZ_ASSERT(sortedKeys.length() == length);
+
+ return true;
+}
+
+bool RecordType::getOwnProperty(JSContext* cx, HandleId id,
+ MutableHandleValue vp) const {
+ if (id.isSymbol()) {
+ return false;
+ }
+
+ uint32_t index;
+
+ // Check for a native dense element.
+ if (id.isInt()) {
+ index = id.toInt();
+ if (containsDenseElement(index)) {
+ vp.set(getDenseElement(index));
+ return true;
+ }
+ }
+
+ // Check for a native property.
+ if (PropMap* map = shape()->lookup(cx, id, &index)) {
+ PropertyInfo info = map->getPropertyInfo(index);
+ MOZ_ASSERT(info.isDataProperty());
+ vp.set(getSlot(info.slot()));
+ return true;
+ }
+
+ return false;
+}
+
+js::HashNumber RecordType::hash(const RecordType::FieldHasher& hasher) {
+ MOZ_ASSERT(isAtomized());
+
+ ArrayObject& sortedKeys =
+ getFixedSlot(SORTED_KEYS_SLOT).toObject().as<ArrayObject>();
+ uint32_t length = sortedKeys.length();
+
+ js::HashNumber h = mozilla::HashGeneric(length);
+ for (uint32_t i = 0; i < length; i++) {
+ JSAtom& key = sortedKeys.getDenseElement(i).toString()->asAtom();
+
+ mozilla::Maybe<PropertyInfo> prop = lookupPure(AtomToId(&key));
+ MOZ_ASSERT(prop.isSome() && prop.value().hasSlot());
+
+ h = mozilla::AddToHash(h, key.hash(), hasher(getSlot(prop.value().slot())));
+ }
+
+ return h;
+}
+
+bool RecordType::ensureAtomized(JSContext* cx) {
+ if (isAtomized()) {
+ return true;
+ }
+
+ ArrayObject& sortedKeys =
+ getFixedSlot(SORTED_KEYS_SLOT).toObject().as<ArrayObject>();
+ uint32_t length = sortedKeys.length();
+
+ RootedValue child(cx);
+ bool updated;
+ for (uint32_t i = 0; i < length; i++) {
+ JSAtom& key = sortedKeys.getDenseElement(i).toString()->asAtom();
+
+ mozilla::Maybe<PropertyInfo> prop = lookupPure(AtomToId(&key));
+ MOZ_ASSERT(prop.isSome() && prop.value().hasSlot());
+ uint32_t slot = prop.value().slot();
+
+ child.set(getSlot(slot));
+
+ if (!EnsureAtomized(cx, &child, &updated)) {
+ return false;
+ }
+ if (updated) {
+ setSlot(slot, child);
+ }
+ }
+
+ setFixedSlot(IS_ATOMIZED_SLOT, BooleanValue(true));
+
+ return true;
+}
+
+bool RecordType::sameValueZero(JSContext* cx, RecordType* lhs, RecordType* rhs,
+ bool* equal) {
+ return sameValueWith<SameValueZero>(cx, lhs, rhs, equal);
+}
+
+bool RecordType::sameValue(JSContext* cx, RecordType* lhs, RecordType* rhs,
+ bool* equal) {
+ return sameValueWith<SameValue>(cx, lhs, rhs, equal);
+}
+
+bool RecordType::sameValueZero(RecordType* lhs, RecordType* rhs) {
+ MOZ_ASSERT(lhs->isAtomized());
+ MOZ_ASSERT(rhs->isAtomized());
+
+ if (lhs == rhs) {
+ return true;
+ }
+
+ uint32_t length = lhs->length();
+
+ if (rhs->length() != length) {
+ return false;
+ }
+
+ ArrayObject& lhsSortedKeys =
+ lhs->getFixedSlot(SORTED_KEYS_SLOT).toObject().as<ArrayObject>();
+ ArrayObject& rhsSortedKeys =
+ rhs->getFixedSlot(SORTED_KEYS_SLOT).toObject().as<ArrayObject>();
+
+ Value v1, v2;
+
+ for (uint32_t index = 0; index < length; index++) {
+ JSAtom* key = &lhsSortedKeys.getDenseElement(index).toString()->asAtom();
+ if (!EqualStrings(
+ key, &rhsSortedKeys.getDenseElement(index).toString()->asAtom())) {
+ return false;
+ }
+
+ {
+ mozilla::Maybe<PropertyInfo> lhsProp = lhs->lookupPure(AtomToId(key));
+ MOZ_ASSERT(lhsProp.isSome() && lhsProp.value().hasSlot());
+ v1 = lhs->getSlot(lhsProp.value().slot());
+ }
+
+ {
+ mozilla::Maybe<PropertyInfo> rhsProp = rhs->lookupPure(AtomToId(key));
+ MOZ_ASSERT(rhsProp.isSome() && rhsProp.value().hasSlot());
+ v2 = rhs->getSlot(rhsProp.value().slot());
+ }
+
+ if (!js::SameValueZeroLinear(v1, v2)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <bool Comparator(JSContext*, HandleValue, HandleValue, bool*)>
+bool RecordType::sameValueWith(JSContext* cx, RecordType* lhs, RecordType* rhs,
+ bool* equal) {
+ if (lhs == rhs) {
+ *equal = true;
+ return true;
+ }
+
+ uint32_t length = lhs->length();
+
+ if (rhs->length() != length) {
+ *equal = false;
+ return true;
+ }
+
+ *equal = true;
+ RootedString k1(cx), k2(cx);
+ RootedId id(cx);
+ RootedValue v1(cx), v2(cx);
+
+ Rooted<ArrayObject*> sortedKeysLHS(
+ cx, &lhs->getFixedSlot(SORTED_KEYS_SLOT).toObject().as<ArrayObject>());
+ Rooted<ArrayObject*> sortedKeysRHS(
+ cx, &rhs->getFixedSlot(SORTED_KEYS_SLOT).toObject().as<ArrayObject>());
+
+ for (uint32_t index = 0; index < length; index++) {
+ k1.set(sortedKeysLHS->getDenseElement(index).toString());
+ k2.set(sortedKeysRHS->getDenseElement(index).toString());
+
+ if (!EqualStrings(cx, k1, k2, equal)) {
+ return false;
+ }
+ if (!*equal) {
+ return true;
+ }
+
+ if (!JS_StringToId(cx, k1, &id)) {
+ return false;
+ }
+
+ // We already know that this is an own property of both records, so both
+ // calls must return true.
+ MOZ_ALWAYS_TRUE(lhs->getOwnProperty(cx, id, &v1));
+ MOZ_ALWAYS_TRUE(rhs->getOwnProperty(cx, id, &v2));
+
+ if (!Comparator(cx, v1, v2, equal)) {
+ return false;
+ }
+ if (!*equal) {
+ return true;
+ }
+ }
+
+ return true;
+}
+
+// Record and Record proposal section 9.2.1
+static bool RecordConstructor(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Step 1.
+ if (args.isConstructing()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_NOT_CONSTRUCTOR, "Record");
+ return false;
+ }
+ // Step 2.
+ RootedObject obj(cx, ToObject(cx, args.get(0)));
+ if (!obj) {
+ return false;
+ }
+
+ // Step 3.
+ RootedIdVector keys(cx);
+ if (!GetPropertyKeys(cx, obj, JSITER_OWNONLY, &keys)) {
+ return false;
+ }
+
+ size_t len = keys.length();
+
+ Rooted<RecordType*> rec(cx, RecordType::createUninitialized(cx, len));
+
+ if (!rec) {
+ return false;
+ }
+
+ RootedId propKey(cx);
+ RootedValue propValue(cx);
+ for (size_t i = 0; i < len; i++) {
+ propKey.set(keys[i]);
+ MOZ_ASSERT(!propKey.isSymbol(), "symbols are filtered out at step 3");
+
+ // Step 4.c.ii.1.
+ if (MOZ_UNLIKELY(!GetProperty(cx, obj, obj, propKey, &propValue))) {
+ return false;
+ }
+
+ if (MOZ_UNLIKELY(!rec->initializeNextProperty(cx, propKey, propValue))) {
+ return false;
+ }
+ }
+
+ if (MOZ_UNLIKELY(!rec->finishInitialization(cx))) {
+ return false;
+ }
+
+ args.rval().setExtendedPrimitive(*rec);
+ return true;
+}
+
+JSString* js::RecordToSource(JSContext* cx, RecordType* rec) {
+ JSStringBuilder sb(cx);
+
+ if (!sb.append("#{")) {
+ return nullptr;
+ }
+
+ ArrayObject& sortedKeys = rec->getFixedSlot(RecordType::SORTED_KEYS_SLOT)
+ .toObject()
+ .as<ArrayObject>();
+
+ uint32_t length = sortedKeys.length();
+
+ Rooted<RecordType*> rootedRec(cx, rec);
+ RootedValue value(cx);
+ RootedString keyStr(cx);
+ RootedId key(cx);
+ JSString* str;
+ for (uint32_t index = 0; index < length; index++) {
+ value.set(sortedKeys.getDenseElement(index));
+ MOZ_ASSERT(value.isString());
+
+ str = ValueToSource(cx, value);
+ if (!str) {
+ return nullptr;
+ }
+ if (!sb.append(str)) {
+ return nullptr;
+ }
+
+ if (!sb.append(": ")) {
+ return nullptr;
+ }
+
+ keyStr.set(value.toString());
+ if (!JS_StringToId(cx, keyStr, &key)) {
+ return nullptr;
+ }
+
+ MOZ_ALWAYS_TRUE(rootedRec->getOwnProperty(cx, key, &value));
+
+ str = ValueToSource(cx, value);
+ if (!str) {
+ return nullptr;
+ }
+ if (!sb.append(str)) {
+ return nullptr;
+ }
+
+ if (index + 1 != length) {
+ if (!sb.append(", ")) {
+ return nullptr;
+ }
+ }
+ }
+
+ /* Finalize the buffer. */
+ if (!sb.append('}')) {
+ return nullptr;
+ }
+
+ return sb.finishString();
+}
diff --git a/js/src/vm/RecordType.h b/js/src/vm/RecordType.h
new file mode 100644
index 0000000000..6295ca29e3
--- /dev/null
+++ b/js/src/vm/RecordType.h
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_RecordType_h
+#define vm_RecordType_h
+
+#include <cstdint>
+#include <functional>
+#include "js/TypeDecls.h"
+#include "vm/ArrayObject.h"
+#include "vm/NativeObject.h"
+
+#include "vm/Shape.h"
+
+namespace JS {
+class RecordType;
+}
+
+namespace js {
+
+extern JSString* RecordToSource(JSContext* cx, JS::RecordType* rec);
+
+}
+
+namespace JS {
+
+class RecordType final : public js::NativeObject {
+ friend JSString* js::RecordToSource(JSContext* cx, RecordType* rec);
+
+ public:
+ enum { SORTED_KEYS_SLOT = 0, IS_ATOMIZED_SLOT, SLOT_COUNT };
+
+ static const js::ClassSpec classSpec_;
+ static const JSClass class_;
+
+ static RecordType* createUninitialized(JSContext* cx, uint32_t initialLength);
+ bool initializeNextProperty(JSContext* cx, Handle<PropertyKey> key,
+ HandleValue value);
+ bool finishInitialization(JSContext* cx);
+ static js::Shape* getInitialShape(JSContext* cx);
+
+ static bool copy(JSContext* cx, Handle<RecordType*> in,
+ MutableHandle<RecordType*> out);
+
+ uint32_t length();
+
+ bool getOwnProperty(JSContext* cx, HandleId id, MutableHandleValue vp) const;
+
+ static bool sameValueZero(JSContext* cx, RecordType* lhs, RecordType* rhs,
+ bool* equal);
+ static bool sameValue(JSContext* cx, RecordType* lhs, RecordType* rhs,
+ bool* equal);
+
+ js::ArrayObject* keys() const {
+ return &getFixedSlot(SORTED_KEYS_SLOT).toObject().as<js::ArrayObject>();
+ }
+
+ using FieldHasher = std::function<js::HashNumber(const Value& child)>;
+ js::HashNumber hash(const FieldHasher& hasher);
+
+ bool ensureAtomized(JSContext* cx);
+ bool isAtomized() const { return getFixedSlot(IS_ATOMIZED_SLOT).toBoolean(); }
+
+ // This can be used to compare atomized records.
+ static bool sameValueZero(RecordType* lhs, RecordType* rhs);
+
+ private:
+ template <bool Comparator(JSContext*, HandleValue, HandleValue, bool*)>
+ static bool sameValueWith(JSContext* cx, RecordType* lhs, RecordType* rhs,
+ bool* equal);
+};
+
+} // namespace JS
+
+#endif
diff --git a/js/src/vm/RegExpObject.cpp b/js/src/vm/RegExpObject.cpp
new file mode 100644
index 0000000000..b9660c97a8
--- /dev/null
+++ b/js/src/vm/RegExpObject.cpp
@@ -0,0 +1,1232 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/RegExpObject.h"
+
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/PodOperations.h"
+
+#include <type_traits>
+
+#include "builtin/RegExp.h"
+#include "builtin/SelfHostingDefines.h" // REGEXP_*_FLAG
+#include "frontend/FrontendContext.h" // AutoReportFrontendContext
+#include "frontend/TokenStream.h"
+#include "gc/HashUtil.h"
+#include "irregexp/RegExpAPI.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/friend/StackLimits.h" // js::ReportOverRecursed
+#include "js/Object.h" // JS::GetBuiltinClass
+#include "js/RegExp.h"
+#include "js/RegExpFlags.h" // JS::RegExpFlags
+#include "util/StringBuffer.h"
+#include "vm/MatchPairs.h"
+#include "vm/PlainObject.h"
+#include "vm/RegExpStatics.h"
+#include "vm/StringType.h"
+#include "vm/WellKnownAtom.h" // js_*_str
+
+#include "vm/JSContext-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/Shape-inl.h"
+
+using namespace js;
+
+using JS::AutoStableStringChars;
+using JS::CompileOptions;
+using JS::RegExpFlag;
+using JS::RegExpFlags;
+using mozilla::DebugOnly;
+using mozilla::PodCopy;
+
+using JS::AutoCheckCannotGC;
+
+static_assert(RegExpFlag::HasIndices == REGEXP_HASINDICES_FLAG,
+ "self-hosted JS and /d flag bits must agree");
+static_assert(RegExpFlag::Global == REGEXP_GLOBAL_FLAG,
+ "self-hosted JS and /g flag bits must agree");
+static_assert(RegExpFlag::IgnoreCase == REGEXP_IGNORECASE_FLAG,
+ "self-hosted JS and /i flag bits must agree");
+static_assert(RegExpFlag::Multiline == REGEXP_MULTILINE_FLAG,
+ "self-hosted JS and /m flag bits must agree");
+static_assert(RegExpFlag::DotAll == REGEXP_DOTALL_FLAG,
+ "self-hosted JS and /s flag bits must agree");
+static_assert(RegExpFlag::Unicode == REGEXP_UNICODE_FLAG,
+ "self-hosted JS and /u flag bits must agree");
+static_assert(RegExpFlag::Sticky == REGEXP_STICKY_FLAG,
+ "self-hosted JS and /y flag bits must agree");
+
+RegExpObject* js::RegExpAlloc(JSContext* cx, NewObjectKind newKind,
+ HandleObject proto /* = nullptr */) {
+ Rooted<RegExpObject*> regexp(
+ cx, NewObjectWithClassProtoAndKind<RegExpObject>(cx, proto, newKind));
+ if (!regexp) {
+ return nullptr;
+ }
+
+ regexp->clearShared();
+
+ if (!SharedShape::ensureInitialCustomShape<RegExpObject>(cx, regexp)) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(regexp->lookupPure(cx->names().lastIndex)->slot() ==
+ RegExpObject::lastIndexSlot());
+
+ return regexp;
+}
+
+/* MatchPairs */
+
+bool VectorMatchPairs::initArrayFrom(VectorMatchPairs& copyFrom) {
+ MOZ_ASSERT(copyFrom.pairCount() > 0);
+
+ if (!allocOrExpandArray(copyFrom.pairCount())) {
+ return false;
+ }
+
+ PodCopy(pairs_, copyFrom.pairs_, pairCount_);
+
+ return true;
+}
+
+bool VectorMatchPairs::allocOrExpandArray(size_t pairCount) {
+ if (!vec_.resizeUninitialized(pairCount)) {
+ return false;
+ }
+
+ pairs_ = &vec_[0];
+ pairCount_ = pairCount;
+ return true;
+}
+
+/* RegExpObject */
+
+/* static */
+RegExpShared* RegExpObject::getShared(JSContext* cx,
+ Handle<RegExpObject*> regexp) {
+ if (regexp->hasShared()) {
+ return regexp->getShared();
+ }
+
+ return createShared(cx, regexp);
+}
+
+/* static */
+bool RegExpObject::isOriginalFlagGetter(JSNative native, RegExpFlags* mask) {
+ if (native == regexp_hasIndices) {
+ *mask = RegExpFlag::HasIndices;
+ return true;
+ }
+ if (native == regexp_global) {
+ *mask = RegExpFlag::Global;
+ return true;
+ }
+ if (native == regexp_ignoreCase) {
+ *mask = RegExpFlag::IgnoreCase;
+ return true;
+ }
+ if (native == regexp_multiline) {
+ *mask = RegExpFlag::Multiline;
+ return true;
+ }
+ if (native == regexp_dotAll) {
+ *mask = RegExpFlag::DotAll;
+ return true;
+ }
+ if (native == regexp_sticky) {
+ *mask = RegExpFlag::Sticky;
+ return true;
+ }
+ if (native == regexp_unicode) {
+ *mask = RegExpFlag::Unicode;
+ return true;
+ }
+
+ return false;
+}
+
+static bool FinishRegExpClassInit(JSContext* cx, JS::HandleObject ctor,
+ JS::HandleObject proto) {
+#ifdef DEBUG
+ // Assert RegExp.prototype.exec is usually stored in a dynamic slot. The
+ // optimization in InlinableNativeIRGenerator::tryAttachIntrinsicRegExpExec
+ // depends on this.
+ Handle<NativeObject*> nproto = proto.as<NativeObject>();
+ auto prop = nproto->lookupPure(cx->names().exec);
+ MOZ_ASSERT(prop->isDataProperty());
+ MOZ_ASSERT(!nproto->isFixedSlot(prop->slot()));
+#endif
+ return true;
+}
+
+static const ClassSpec RegExpObjectClassSpec = {
+ GenericCreateConstructor<js::regexp_construct, 2, gc::AllocKind::FUNCTION>,
+ GenericCreatePrototype<RegExpObject>,
+ nullptr,
+ js::regexp_static_props,
+ js::regexp_methods,
+ js::regexp_properties,
+ FinishRegExpClassInit};
+
+const JSClass RegExpObject::class_ = {
+ js_RegExp_str,
+ JSCLASS_HAS_RESERVED_SLOTS(RegExpObject::RESERVED_SLOTS) |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_RegExp),
+ JS_NULL_CLASS_OPS, &RegExpObjectClassSpec};
+
+const JSClass RegExpObject::protoClass_ = {
+ "RegExp.prototype", JSCLASS_HAS_CACHED_PROTO(JSProto_RegExp),
+ JS_NULL_CLASS_OPS, &RegExpObjectClassSpec};
+
+template <typename CharT>
+RegExpObject* RegExpObject::create(JSContext* cx, const CharT* chars,
+ size_t length, RegExpFlags flags,
+ NewObjectKind newKind) {
+ static_assert(std::is_same_v<CharT, char16_t>,
+ "this code may need updating if/when CharT encodes UTF-8");
+
+ Rooted<JSAtom*> source(cx, AtomizeChars(cx, chars, length));
+ if (!source) {
+ return nullptr;
+ }
+
+ return create(cx, source, flags, newKind);
+}
+
+template RegExpObject* RegExpObject::create(JSContext* cx,
+ const char16_t* chars,
+ size_t length, RegExpFlags flags,
+ NewObjectKind newKind);
+
+RegExpObject* RegExpObject::createSyntaxChecked(JSContext* cx,
+ Handle<JSAtom*> source,
+ RegExpFlags flags,
+ NewObjectKind newKind) {
+ Rooted<RegExpObject*> regexp(cx, RegExpAlloc(cx, newKind));
+ if (!regexp) {
+ return nullptr;
+ }
+
+ regexp->initAndZeroLastIndex(source, flags, cx);
+
+ return regexp;
+}
+
+RegExpObject* RegExpObject::create(JSContext* cx, Handle<JSAtom*> source,
+ RegExpFlags flags, NewObjectKind newKind) {
+ Rooted<RegExpObject*> regexp(cx);
+ {
+ AutoReportFrontendContext fc(cx);
+ CompileOptions dummyOptions(cx);
+ frontend::DummyTokenStream dummyTokenStream(&fc, dummyOptions);
+
+ LifoAllocScope allocScope(&cx->tempLifoAlloc());
+ if (!irregexp::CheckPatternSyntax(cx, cx->stackLimitForCurrentPrincipal(),
+ dummyTokenStream, source, flags)) {
+ return nullptr;
+ }
+
+ regexp = RegExpAlloc(cx, newKind);
+ if (!regexp) {
+ return nullptr;
+ }
+
+ regexp->initAndZeroLastIndex(source, flags, cx);
+
+ MOZ_ASSERT(!regexp->hasShared());
+ }
+ return regexp;
+}
+
+/* static */
+RegExpShared* RegExpObject::createShared(JSContext* cx,
+ Handle<RegExpObject*> regexp) {
+ MOZ_ASSERT(!regexp->hasShared());
+ Rooted<JSAtom*> source(cx, regexp->getSource());
+ RegExpShared* shared =
+ cx->zone()->regExps().get(cx, source, regexp->getFlags());
+ if (!shared) {
+ return nullptr;
+ }
+
+ regexp->setShared(shared);
+
+ MOZ_ASSERT(regexp->hasShared());
+
+ return shared;
+}
+
+SharedShape* RegExpObject::assignInitialShape(JSContext* cx,
+ Handle<RegExpObject*> self) {
+ MOZ_ASSERT(self->empty());
+
+ static_assert(LAST_INDEX_SLOT == 0);
+
+ /* The lastIndex property alone is writable but non-configurable. */
+ if (!NativeObject::addPropertyInReservedSlot(cx, self, cx->names().lastIndex,
+ LAST_INDEX_SLOT,
+ {PropertyFlag::Writable})) {
+ return nullptr;
+ }
+
+ return self->sharedShape();
+}
+
+void RegExpObject::initIgnoringLastIndex(JSAtom* source, RegExpFlags flags) {
+ // If this is a re-initialization with an existing RegExpShared, 'flags'
+ // may not match getShared()->flags, so forget the RegExpShared.
+ clearShared();
+
+ setSource(source);
+ setFlags(flags);
+}
+
+void RegExpObject::initAndZeroLastIndex(JSAtom* source, RegExpFlags flags,
+ JSContext* cx) {
+ initIgnoringLastIndex(source, flags);
+ zeroLastIndex(cx);
+}
+
+static MOZ_ALWAYS_INLINE bool IsRegExpLineTerminator(const JS::Latin1Char c) {
+ return c == '\n' || c == '\r';
+}
+
+static MOZ_ALWAYS_INLINE bool IsRegExpLineTerminator(const char16_t c) {
+ return c == '\n' || c == '\r' || c == 0x2028 || c == 0x2029;
+}
+
+static MOZ_ALWAYS_INLINE bool AppendEscapedLineTerminator(
+ StringBuffer& sb, const JS::Latin1Char c) {
+ switch (c) {
+ case '\n':
+ if (!sb.append('n')) {
+ return false;
+ }
+ break;
+ case '\r':
+ if (!sb.append('r')) {
+ return false;
+ }
+ break;
+ default:
+ MOZ_CRASH("Bad LineTerminator");
+ }
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool AppendEscapedLineTerminator(StringBuffer& sb,
+ const char16_t c) {
+ switch (c) {
+ case '\n':
+ if (!sb.append('n')) {
+ return false;
+ }
+ break;
+ case '\r':
+ if (!sb.append('r')) {
+ return false;
+ }
+ break;
+ case 0x2028:
+ if (!sb.append("u2028")) {
+ return false;
+ }
+ break;
+ case 0x2029:
+ if (!sb.append("u2029")) {
+ return false;
+ }
+ break;
+ default:
+ MOZ_CRASH("Bad LineTerminator");
+ }
+ return true;
+}
+
+template <typename CharT>
+static MOZ_ALWAYS_INLINE bool SetupBuffer(StringBuffer& sb,
+ const CharT* oldChars, size_t oldLen,
+ const CharT* it) {
+ if constexpr (std::is_same_v<CharT, char16_t>) {
+ if (!sb.ensureTwoByteChars()) {
+ return false;
+ }
+ }
+
+ if (!sb.reserve(oldLen + 1)) {
+ return false;
+ }
+
+ sb.infallibleAppend(oldChars, size_t(it - oldChars));
+ return true;
+}
+
+// Note: leaves the string buffer empty if no escaping need be performed.
+template <typename CharT>
+static bool EscapeRegExpPattern(StringBuffer& sb, const CharT* oldChars,
+ size_t oldLen) {
+ bool inBrackets = false;
+ bool previousCharacterWasBackslash = false;
+
+ for (const CharT* it = oldChars; it < oldChars + oldLen; ++it) {
+ CharT ch = *it;
+ if (!previousCharacterWasBackslash) {
+ if (inBrackets) {
+ if (ch == ']') {
+ inBrackets = false;
+ }
+ } else if (ch == '/') {
+ // There's a forward slash that needs escaping.
+ if (sb.empty()) {
+ // This is the first char we've seen that needs escaping,
+ // copy everything up to this point.
+ if (!SetupBuffer(sb, oldChars, oldLen, it)) {
+ return false;
+ }
+ }
+ if (!sb.append('\\')) {
+ return false;
+ }
+ } else if (ch == '[') {
+ inBrackets = true;
+ }
+ }
+
+ if (IsRegExpLineTerminator(ch)) {
+ // There's LineTerminator that needs escaping.
+ if (sb.empty()) {
+ // This is the first char we've seen that needs escaping,
+ // copy everything up to this point.
+ if (!SetupBuffer(sb, oldChars, oldLen, it)) {
+ return false;
+ }
+ }
+ if (!previousCharacterWasBackslash) {
+ if (!sb.append('\\')) {
+ return false;
+ }
+ }
+ if (!AppendEscapedLineTerminator(sb, ch)) {
+ return false;
+ }
+ } else if (!sb.empty()) {
+ if (!sb.append(ch)) {
+ return false;
+ }
+ }
+
+ if (previousCharacterWasBackslash) {
+ previousCharacterWasBackslash = false;
+ } else if (ch == '\\') {
+ previousCharacterWasBackslash = true;
+ }
+ }
+
+ return true;
+}
+
+// ES6 draft rev32 21.2.3.2.4.
+JSLinearString* js::EscapeRegExpPattern(JSContext* cx, Handle<JSAtom*> src) {
+ // Step 2.
+ if (src->length() == 0) {
+ return cx->names().emptyRegExp;
+ }
+
+ // We may never need to use |sb|. Start using it lazily.
+ JSStringBuilder sb(cx);
+ bool escapeFailed = false;
+ if (src->hasLatin1Chars()) {
+ JS::AutoCheckCannotGC nogc;
+ escapeFailed =
+ !::EscapeRegExpPattern(sb, src->latin1Chars(nogc), src->length());
+ } else {
+ JS::AutoCheckCannotGC nogc;
+ escapeFailed =
+ !::EscapeRegExpPattern(sb, src->twoByteChars(nogc), src->length());
+ }
+ if (escapeFailed) {
+ return nullptr;
+ }
+
+ // Step 3.
+ if (sb.empty()) {
+ return src;
+ }
+ return sb.finishString();
+}
+
+// ES6 draft rev32 21.2.5.14. Optimized for RegExpObject.
+JSLinearString* RegExpObject::toString(JSContext* cx,
+ Handle<RegExpObject*> obj) {
+ // Steps 3-4.
+ Rooted<JSAtom*> src(cx, obj->getSource());
+ if (!src) {
+ return nullptr;
+ }
+ Rooted<JSLinearString*> escapedSrc(cx, EscapeRegExpPattern(cx, src));
+
+ // Step 7.
+ JSStringBuilder sb(cx);
+ size_t len = escapedSrc->length();
+ if (!sb.reserve(len + 2)) {
+ return nullptr;
+ }
+ sb.infallibleAppend('/');
+ if (!sb.append(escapedSrc)) {
+ return nullptr;
+ }
+ sb.infallibleAppend('/');
+
+ // Steps 5-7.
+ if (obj->hasIndices() && !sb.append('d')) {
+ return nullptr;
+ }
+ if (obj->global() && !sb.append('g')) {
+ return nullptr;
+ }
+ if (obj->ignoreCase() && !sb.append('i')) {
+ return nullptr;
+ }
+ if (obj->multiline() && !sb.append('m')) {
+ return nullptr;
+ }
+ if (obj->dotAll() && !sb.append('s')) {
+ return nullptr;
+ }
+ if (obj->unicode() && !sb.append('u')) {
+ return nullptr;
+ }
+ if (obj->sticky() && !sb.append('y')) {
+ return nullptr;
+ }
+
+ return sb.finishString();
+}
+
+template <typename CharT>
+static MOZ_ALWAYS_INLINE bool IsRegExpMetaChar(CharT ch) {
+ switch (ch) {
+ /* ES 2016 draft Mar 25, 2016 21.2.1 SyntaxCharacter. */
+ case '^':
+ case '$':
+ case '\\':
+ case '.':
+ case '*':
+ case '+':
+ case '?':
+ case '(':
+ case ')':
+ case '[':
+ case ']':
+ case '{':
+ case '}':
+ case '|':
+ return true;
+ default:
+ return false;
+ }
+}
+
+template <typename CharT>
+bool js::HasRegExpMetaChars(const CharT* chars, size_t length) {
+ for (size_t i = 0; i < length; ++i) {
+ if (IsRegExpMetaChar<CharT>(chars[i])) {
+ return true;
+ }
+ }
+ return false;
+}
+
+template bool js::HasRegExpMetaChars<Latin1Char>(const Latin1Char* chars,
+ size_t length);
+
+template bool js::HasRegExpMetaChars<char16_t>(const char16_t* chars,
+ size_t length);
+
+bool js::StringHasRegExpMetaChars(JSLinearString* str) {
+ AutoCheckCannotGC nogc;
+ if (str->hasLatin1Chars()) {
+ return HasRegExpMetaChars(str->latin1Chars(nogc), str->length());
+ }
+
+ return HasRegExpMetaChars(str->twoByteChars(nogc), str->length());
+}
+
+/* RegExpShared */
+
+RegExpShared::RegExpShared(JSAtom* source, RegExpFlags flags)
+ : CellWithTenuredGCPointer(source), pairCount_(0), flags(flags) {}
+
+void RegExpShared::traceChildren(JSTracer* trc) {
+ TraceNullableCellHeaderEdge(trc, this, "RegExpShared source");
+ if (kind() == RegExpShared::Kind::Atom) {
+ TraceNullableEdge(trc, &patternAtom_, "RegExpShared pattern atom");
+ } else {
+ for (auto& comp : compilationArray) {
+ TraceNullableEdge(trc, &comp.jitCode, "RegExpShared code");
+ }
+ TraceNullableEdge(trc, &groupsTemplate_, "RegExpShared groups template");
+ }
+}
+
+void RegExpShared::discardJitCode() {
+ for (auto& comp : compilationArray) {
+ comp.jitCode = nullptr;
+ }
+
+ // We can also purge the tables used by JIT code.
+ tables.clearAndFree();
+}
+
+void RegExpShared::finalize(JS::GCContext* gcx) {
+ for (auto& comp : compilationArray) {
+ if (comp.byteCode) {
+ size_t length = comp.byteCodeLength();
+ gcx->free_(this, comp.byteCode, length, MemoryUse::RegExpSharedBytecode);
+ }
+ }
+ if (namedCaptureIndices_) {
+ size_t length = numNamedCaptures() * sizeof(uint32_t);
+ gcx->free_(this, namedCaptureIndices_, length,
+ MemoryUse::RegExpSharedNamedCaptureData);
+ }
+ tables.~JitCodeTables();
+}
+
+/* static */
+bool RegExpShared::compileIfNecessary(JSContext* cx,
+ MutableHandleRegExpShared re,
+ Handle<JSLinearString*> input,
+ RegExpShared::CodeKind codeKind) {
+ if (codeKind == RegExpShared::CodeKind::Any) {
+ // We start by interpreting regexps, then compile them once they are
+ // sufficiently hot. For very long input strings, we tier up eagerly.
+ codeKind = RegExpShared::CodeKind::Bytecode;
+ if (re->markedForTierUp() || input->length() > 1000) {
+ codeKind = RegExpShared::CodeKind::Jitcode;
+ }
+ }
+
+ // Fall back to bytecode if native codegen is not available.
+ if (!IsNativeRegExpEnabled() && codeKind == RegExpShared::CodeKind::Jitcode) {
+ codeKind = RegExpShared::CodeKind::Bytecode;
+ }
+
+ bool needsCompile = false;
+ if (re->kind() == RegExpShared::Kind::Unparsed) {
+ needsCompile = true;
+ }
+ if (re->kind() == RegExpShared::Kind::RegExp) {
+ if (!re->isCompiled(input->hasLatin1Chars(), codeKind)) {
+ needsCompile = true;
+ }
+ }
+ if (needsCompile) {
+ return irregexp::CompilePattern(cx, re, input, codeKind);
+ }
+ return true;
+}
+
+/* static */
+RegExpRunStatus RegExpShared::execute(JSContext* cx,
+ MutableHandleRegExpShared re,
+ Handle<JSLinearString*> input,
+ size_t start, VectorMatchPairs* matches) {
+ MOZ_ASSERT(matches);
+
+ // TODO: Add tracelogger support
+
+ /* Compile the code at point-of-use. */
+ if (!compileIfNecessary(cx, re, input, RegExpShared::CodeKind::Any)) {
+ return RegExpRunStatus_Error;
+ }
+
+ /*
+ * Ensure sufficient memory for output vector.
+ * No need to initialize it. The RegExp engine fills them in on a match.
+ */
+ if (!matches->allocOrExpandArray(re->pairCount())) {
+ ReportOutOfMemory(cx);
+ return RegExpRunStatus_Error;
+ }
+
+ if (re->kind() == RegExpShared::Kind::Atom) {
+ return RegExpShared::executeAtom(re, input, start, matches);
+ }
+
+ /*
+ * Ensure sufficient memory for output vector.
+ * No need to initialize it. The RegExp engine fills them in on a match.
+ */
+ if (!matches->allocOrExpandArray(re->pairCount())) {
+ ReportOutOfMemory(cx);
+ return RegExpRunStatus_Error;
+ }
+
+ uint32_t interruptRetries = 0;
+ const uint32_t maxInterruptRetries = 4;
+ do {
+ DebugOnly<bool> alreadyThrowing = cx->isExceptionPending();
+ RegExpRunStatus result = irregexp::Execute(cx, re, input, start, matches);
+#ifdef DEBUG
+ // Check if we must simulate the interruption
+ if (js::irregexp::IsolateShouldSimulateInterrupt(cx->isolate)) {
+ js::irregexp::IsolateClearShouldSimulateInterrupt(cx->isolate);
+ cx->requestInterrupt(InterruptReason::CallbackUrgent);
+ }
+#endif
+ if (result == RegExpRunStatus_Error) {
+ /* Execute can return RegExpRunStatus_Error:
+ *
+ * 1. If the native stack overflowed
+ * 2. If the backtrack stack overflowed
+ * 3. If an interrupt was requested during execution.
+ *
+ * In the first two cases, we want to throw an error. In the
+ * third case, we want to handle the interrupt and try again.
+ * We cap the number of times we will retry.
+ */
+ if (cx->isExceptionPending()) {
+ // If this regexp is being executed by recovery instructions
+ // while bailing out to handle an exception, there may already
+ // be an exception pending. If so, just return that exception
+ // instead of reporting a new one.
+ MOZ_ASSERT(alreadyThrowing);
+ return RegExpRunStatus_Error;
+ }
+ if (cx->hasAnyPendingInterrupt()) {
+ if (!CheckForInterrupt(cx)) {
+ return RegExpRunStatus_Error;
+ }
+ if (interruptRetries++ < maxInterruptRetries) {
+ // The initial execution may have been interpreted, or the
+ // interrupt may have triggered a GC that discarded jitcode.
+ // To maximize the chance of succeeding before being
+ // interrupted again, we want to ensure we are compiled.
+ if (!compileIfNecessary(cx, re, input,
+ RegExpShared::CodeKind::Jitcode)) {
+ return RegExpRunStatus_Error;
+ }
+ continue;
+ }
+ }
+ // If we have run out of retries, this regexp takes too long to execute.
+ ReportOverRecursed(cx);
+ return RegExpRunStatus_Error;
+ }
+
+ MOZ_ASSERT(result == RegExpRunStatus_Success ||
+ result == RegExpRunStatus_Success_NotFound);
+
+ return result;
+ } while (true);
+
+ MOZ_CRASH("Unreachable");
+}
+
+void RegExpShared::useAtomMatch(Handle<JSAtom*> pattern) {
+ MOZ_ASSERT(kind() == RegExpShared::Kind::Unparsed);
+ kind_ = RegExpShared::Kind::Atom;
+ patternAtom_ = pattern;
+ pairCount_ = 1;
+}
+
+void RegExpShared::useRegExpMatch(size_t pairCount) {
+ MOZ_ASSERT(kind() == RegExpShared::Kind::Unparsed);
+ kind_ = RegExpShared::Kind::RegExp;
+ pairCount_ = pairCount;
+ ticks_ = jit::JitOptions.regexpWarmUpThreshold;
+}
+
+/* static */
+void RegExpShared::InitializeNamedCaptures(JSContext* cx, HandleRegExpShared re,
+ uint32_t numNamedCaptures,
+ Handle<PlainObject*> templateObject,
+ uint32_t* captureIndices) {
+ MOZ_ASSERT(!re->groupsTemplate_);
+ MOZ_ASSERT(!re->namedCaptureIndices_);
+
+ re->numNamedCaptures_ = numNamedCaptures;
+ re->groupsTemplate_ = templateObject;
+ re->namedCaptureIndices_ = captureIndices;
+
+ uint32_t arraySize = numNamedCaptures * sizeof(uint32_t);
+ js::AddCellMemory(re, arraySize, MemoryUse::RegExpSharedNamedCaptureData);
+}
+
+void RegExpShared::tierUpTick() {
+ MOZ_ASSERT(kind() == RegExpShared::Kind::RegExp);
+ if (ticks_ > 0) {
+ ticks_--;
+ }
+}
+
+bool RegExpShared::markedForTierUp() const {
+ if (!IsNativeRegExpEnabled()) {
+ return false;
+ }
+ if (kind() != RegExpShared::Kind::RegExp) {
+ return false;
+ }
+ return ticks_ == 0;
+}
+
+static RegExpRunStatus ExecuteAtomImpl(RegExpShared* re, JSLinearString* input,
+ size_t start, MatchPairs* matches) {
+ MOZ_ASSERT(re->pairCount() == 1);
+ size_t length = input->length();
+ size_t searchLength = re->patternAtom()->length();
+
+ if (re->sticky()) {
+ // First part checks size_t overflow.
+ if (searchLength + start < searchLength || searchLength + start > length) {
+ return RegExpRunStatus_Success_NotFound;
+ }
+ if (!HasSubstringAt(input, re->patternAtom(), start)) {
+ return RegExpRunStatus_Success_NotFound;
+ }
+
+ (*matches)[0].start = start;
+ (*matches)[0].limit = start + searchLength;
+ matches->checkAgainst(input->length());
+ return RegExpRunStatus_Success;
+ }
+
+ int res = StringFindPattern(input, re->patternAtom(), start);
+ if (res == -1) {
+ return RegExpRunStatus_Success_NotFound;
+ }
+
+ (*matches)[0].start = res;
+ (*matches)[0].limit = res + searchLength;
+ matches->checkAgainst(input->length());
+ return RegExpRunStatus_Success;
+}
+
+RegExpRunStatus js::ExecuteRegExpAtomRaw(RegExpShared* re,
+ JSLinearString* input, size_t start,
+ MatchPairs* matchPairs) {
+ AutoUnsafeCallWithABI unsafe;
+ return ExecuteAtomImpl(re, input, start, matchPairs);
+}
+
+/* static */
+RegExpRunStatus RegExpShared::executeAtom(MutableHandleRegExpShared re,
+ Handle<JSLinearString*> input,
+ size_t start,
+ VectorMatchPairs* matches) {
+ return ExecuteAtomImpl(re, input, start, matches);
+}
+
+size_t RegExpShared::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ size_t n = 0;
+
+ for (const auto& compilation : compilationArray) {
+ if (compilation.byteCode) {
+ n += mallocSizeOf(compilation.byteCode);
+ }
+ }
+
+ n += tables.sizeOfExcludingThis(mallocSizeOf);
+ for (size_t i = 0; i < tables.length(); i++) {
+ n += mallocSizeOf(tables[i].get());
+ }
+
+ return n;
+}
+
+/* RegExpRealm */
+
+RegExpRealm::RegExpRealm()
+ : optimizableRegExpPrototypeShape_(nullptr),
+ optimizableRegExpInstanceShape_(nullptr) {
+ for (auto& templateObj : matchResultTemplateObjects_) {
+ templateObj = nullptr;
+ }
+}
+
+ArrayObject* RegExpRealm::createMatchResultTemplateObject(
+ JSContext* cx, ResultTemplateKind kind) {
+ MOZ_ASSERT(!matchResultTemplateObjects_[kind]);
+
+ /* Create template array object */
+ Rooted<ArrayObject*> templateObject(
+ cx,
+ NewDenseUnallocatedArray(cx, RegExpObject::MaxPairCount, TenuredObject));
+ if (!templateObject) {
+ return nullptr;
+ }
+
+ if (kind == ResultTemplateKind::Indices) {
+ /* The |indices| array only has a |groups| property. */
+ RootedValue groupsVal(cx, UndefinedValue());
+ if (!NativeDefineDataProperty(cx, templateObject, cx->names().groups,
+ groupsVal, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ MOZ_ASSERT(templateObject->getLastProperty().slot() == IndicesGroupsSlot);
+
+ matchResultTemplateObjects_[kind].set(templateObject);
+ return matchResultTemplateObjects_[kind];
+ }
+
+ /* Set dummy index property */
+ RootedValue index(cx, Int32Value(0));
+ if (!NativeDefineDataProperty(cx, templateObject, cx->names().index, index,
+ JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ MOZ_ASSERT(templateObject->getLastProperty().slot() ==
+ MatchResultObjectIndexSlot);
+
+ /* Set dummy input property */
+ RootedValue inputVal(cx, StringValue(cx->runtime()->emptyString));
+ if (!NativeDefineDataProperty(cx, templateObject, cx->names().input, inputVal,
+ JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ MOZ_ASSERT(templateObject->getLastProperty().slot() ==
+ MatchResultObjectInputSlot);
+
+ /* Set dummy groups property */
+ RootedValue groupsVal(cx, UndefinedValue());
+ if (!NativeDefineDataProperty(cx, templateObject, cx->names().groups,
+ groupsVal, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ MOZ_ASSERT(templateObject->getLastProperty().slot() ==
+ MatchResultObjectGroupsSlot);
+
+ if (kind == ResultTemplateKind::WithIndices) {
+ /* Set dummy indices property */
+ RootedValue indicesVal(cx, UndefinedValue());
+ if (!NativeDefineDataProperty(cx, templateObject, cx->names().indices,
+ indicesVal, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ MOZ_ASSERT(templateObject->getLastProperty().slot() ==
+ MatchResultObjectIndicesSlot);
+ }
+
+ matchResultTemplateObjects_[kind].set(templateObject);
+
+ return matchResultTemplateObjects_[kind];
+}
+
+void RegExpRealm::traceWeak(JSTracer* trc) {
+ for (auto& templateObject : matchResultTemplateObjects_) {
+ TraceWeakEdge(trc, &templateObject,
+ "RegExpRealm::matchResultTemplateObject_");
+ }
+
+ TraceWeakEdge(trc, &optimizableRegExpPrototypeShape_,
+ "RegExpRealm::optimizableRegExpPrototypeShape_");
+
+ TraceWeakEdge(trc, &optimizableRegExpInstanceShape_,
+ "RegExpRealm::optimizableRegExpInstanceShape_");
+}
+
+RegExpShared* RegExpZone::get(JSContext* cx, Handle<JSAtom*> source,
+ RegExpFlags flags) {
+ DependentAddPtr<Set> p(cx, set_, Key(source, flags));
+ if (p) {
+ return *p;
+ }
+
+ auto* shared = cx->newCell<RegExpShared>(source, flags);
+ if (!shared) {
+ return nullptr;
+ }
+
+ if (!p.add(cx, set_, Key(source, flags), shared)) {
+ return nullptr;
+ }
+
+ return shared;
+}
+
+size_t RegExpZone::sizeOfIncludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + set_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+RegExpZone::RegExpZone(Zone* zone) : set_(zone, zone) {}
+
+/* Functions */
+
+JSObject* js::CloneRegExpObject(JSContext* cx, Handle<RegExpObject*> regex) {
+ // Unlike RegExpAlloc, all clones must use |regex|'s group.
+ Rooted<TaggedProto> proto(cx, regex->staticPrototype());
+ Rooted<RegExpObject*> clone(
+ cx, NewObjectWithGivenTaggedProto<RegExpObject>(cx, proto));
+ if (!clone) {
+ return nullptr;
+ }
+
+ clone->clearShared();
+
+ clone->setShape(regex->shape());
+
+ RegExpShared* shared = RegExpObject::getShared(cx, regex);
+ if (!shared) {
+ return nullptr;
+ }
+
+ clone->initAndZeroLastIndex(shared->getSource(), shared->getFlags(), cx);
+ clone->setShared(shared);
+
+ return clone;
+}
+
+template <typename CharT>
+static bool ParseRegExpFlags(const CharT* chars, size_t length,
+ RegExpFlags* flagsOut, char16_t* invalidFlag) {
+ *flagsOut = RegExpFlag::NoFlags;
+
+ for (size_t i = 0; i < length; i++) {
+ uint8_t flag;
+ switch (chars[i]) {
+ case 'd':
+ flag = RegExpFlag::HasIndices;
+ break;
+ case 'g':
+ flag = RegExpFlag::Global;
+ break;
+ case 'i':
+ flag = RegExpFlag::IgnoreCase;
+ break;
+ case 'm':
+ flag = RegExpFlag::Multiline;
+ break;
+ case 's':
+ flag = RegExpFlag::DotAll;
+ break;
+ case 'u':
+ flag = RegExpFlag::Unicode;
+ break;
+ case 'y':
+ flag = RegExpFlag::Sticky;
+ break;
+ default:
+ *invalidFlag = chars[i];
+ return false;
+ }
+ if (*flagsOut & flag) {
+ *invalidFlag = chars[i];
+ return false;
+ }
+ *flagsOut |= flag;
+ }
+
+ return true;
+}
+
+bool js::ParseRegExpFlags(JSContext* cx, JSString* flagStr,
+ RegExpFlags* flagsOut) {
+ JSLinearString* linear = flagStr->ensureLinear(cx);
+ if (!linear) {
+ return false;
+ }
+
+ size_t len = linear->length();
+
+ bool ok;
+ char16_t invalidFlag;
+ if (linear->hasLatin1Chars()) {
+ AutoCheckCannotGC nogc;
+ ok = ::ParseRegExpFlags(linear->latin1Chars(nogc), len, flagsOut,
+ &invalidFlag);
+ } else {
+ AutoCheckCannotGC nogc;
+ ok = ::ParseRegExpFlags(linear->twoByteChars(nogc), len, flagsOut,
+ &invalidFlag);
+ }
+
+ if (!ok) {
+ JS::TwoByteChars range(&invalidFlag, 1);
+ UniqueChars utf8(JS::CharsToNewUTF8CharsZ(cx, range).c_str());
+ if (!utf8) {
+ return false;
+ }
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_REGEXP_FLAG, utf8.get());
+ return false;
+ }
+
+ return true;
+}
+
+JS::ubi::Node::Size JS::ubi::Concrete<RegExpShared>::size(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return js::gc::Arena::thingSize(gc::AllocKind::REGEXP_SHARED) +
+ get().sizeOfExcludingThis(mallocSizeOf);
+}
+
+/*
+ * Regular Expressions.
+ */
+JS_PUBLIC_API JSObject* JS::NewRegExpObject(JSContext* cx, const char* bytes,
+ size_t length, RegExpFlags flags) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ UniqueTwoByteChars chars(InflateString(cx, bytes, length));
+ if (!chars) {
+ return nullptr;
+ }
+
+ return RegExpObject::create(cx, chars.get(), length, flags, GenericObject);
+}
+
+JS_PUBLIC_API JSObject* JS::NewUCRegExpObject(JSContext* cx,
+ const char16_t* chars,
+ size_t length,
+ RegExpFlags flags) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ return RegExpObject::create(cx, chars, length, flags, GenericObject);
+}
+
+JS_PUBLIC_API bool JS::SetRegExpInput(JSContext* cx, HandleObject obj,
+ HandleString input) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(input);
+
+ Handle<GlobalObject*> global = obj.as<GlobalObject>();
+ RegExpStatics* res = GlobalObject::getRegExpStatics(cx, global);
+ if (!res) {
+ return false;
+ }
+
+ res->reset(input);
+ return true;
+}
+
+JS_PUBLIC_API bool JS::ClearRegExpStatics(JSContext* cx, HandleObject obj) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_ASSERT(obj);
+
+ Handle<GlobalObject*> global = obj.as<GlobalObject>();
+ RegExpStatics* res = GlobalObject::getRegExpStatics(cx, global);
+ if (!res) {
+ return false;
+ }
+
+ res->clear();
+ return true;
+}
+
+JS_PUBLIC_API bool JS::ExecuteRegExp(JSContext* cx, HandleObject obj,
+ HandleObject reobj, const char16_t* chars,
+ size_t length, size_t* indexp, bool test,
+ MutableHandleValue rval) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ Handle<GlobalObject*> global = obj.as<GlobalObject>();
+ RegExpStatics* res = GlobalObject::getRegExpStatics(cx, global);
+ if (!res) {
+ return false;
+ }
+
+ Rooted<JSLinearString*> input(cx, NewStringCopyN<CanGC>(cx, chars, length));
+ if (!input) {
+ return false;
+ }
+
+ return ExecuteRegExpLegacy(cx, res, reobj.as<RegExpObject>(), input, indexp,
+ test, rval);
+}
+
+JS_PUBLIC_API bool JS::ExecuteRegExpNoStatics(JSContext* cx, HandleObject obj,
+ const char16_t* chars,
+ size_t length, size_t* indexp,
+ bool test,
+ MutableHandleValue rval) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ Rooted<JSLinearString*> input(cx, NewStringCopyN<CanGC>(cx, chars, length));
+ if (!input) {
+ return false;
+ }
+
+ return ExecuteRegExpLegacy(cx, nullptr, obj.as<RegExpObject>(), input, indexp,
+ test, rval);
+}
+
+JS_PUBLIC_API bool JS::ObjectIsRegExp(JSContext* cx, HandleObject obj,
+ bool* isRegExp) {
+ cx->check(obj);
+
+ ESClass cls;
+ if (!GetBuiltinClass(cx, obj, &cls)) {
+ return false;
+ }
+
+ *isRegExp = cls == ESClass::RegExp;
+ return true;
+}
+
+JS_PUBLIC_API RegExpFlags JS::GetRegExpFlags(JSContext* cx, HandleObject obj) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ RegExpShared* shared = RegExpToShared(cx, obj);
+ if (!shared) {
+ return RegExpFlag::NoFlags;
+ }
+ return shared->getFlags();
+}
+
+JS_PUBLIC_API JSString* JS::GetRegExpSource(JSContext* cx, HandleObject obj) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ RegExpShared* shared = RegExpToShared(cx, obj);
+ if (!shared) {
+ return nullptr;
+ }
+ return shared->getSource();
+}
+
+JS_PUBLIC_API bool JS::CheckRegExpSyntax(JSContext* cx, const char16_t* chars,
+ size_t length, RegExpFlags flags,
+ MutableHandleValue error) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ AutoReportFrontendContext fc(cx);
+ CompileOptions dummyOptions(cx);
+ frontend::DummyTokenStream dummyTokenStream(&fc, dummyOptions);
+
+ LifoAllocScope allocScope(&cx->tempLifoAlloc());
+
+ mozilla::Range<const char16_t> source(chars, length);
+ bool success = irregexp::CheckPatternSyntax(
+ cx->tempLifoAlloc(), cx->stackLimitForCurrentPrincipal(),
+ dummyTokenStream, source, flags);
+ error.set(UndefinedValue());
+ if (!success) {
+ if (!fc.convertToRuntimeErrorAndClear()) {
+ return false;
+ }
+ // We can fail because of OOM or over-recursion even if the syntax is valid.
+ if (cx->isThrowingOutOfMemory() || cx->isThrowingOverRecursed()) {
+ return false;
+ }
+
+ if (!cx->getPendingException(error)) {
+ return false;
+ }
+ cx->clearPendingException();
+ }
+ return true;
+}
diff --git a/js/src/vm/RegExpObject.h b/js/src/vm/RegExpObject.h
new file mode 100644
index 0000000000..a47294e727
--- /dev/null
+++ b/js/src/vm/RegExpObject.h
@@ -0,0 +1,223 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JavaScript RegExp objects. */
+
+#ifndef vm_RegExpObject_h
+#define vm_RegExpObject_h
+
+#include "builtin/SelfHostingDefines.h"
+#include "js/RegExpFlags.h"
+#include "proxy/Proxy.h"
+#include "vm/JSAtomState.h"
+#include "vm/JSContext.h"
+#include "vm/RegExpShared.h"
+#include "vm/Shape.h"
+
+/*
+ * JavaScript Regular Expressions
+ *
+ * There are several engine concepts associated with a single logical regexp:
+ *
+ * RegExpObject:
+ * The JS-visible object whose .[[Class]] equals "RegExp".
+ * RegExpShared:
+ * The compiled representation of the regexp (lazily created, cleared
+ * during some forms of GC).
+ * RegExpZone:
+ * Owns all RegExpShared instances in a zone.
+ */
+namespace js {
+
+extern RegExpObject* RegExpAlloc(JSContext* cx, NewObjectKind newKind,
+ HandleObject proto = nullptr);
+
+extern JSObject* CloneRegExpObject(JSContext* cx, Handle<RegExpObject*> regex);
+
+class RegExpObject : public NativeObject {
+ static const unsigned LAST_INDEX_SLOT = 0;
+ static const unsigned SOURCE_SLOT = 1;
+ static const unsigned FLAGS_SLOT = 2;
+
+ static_assert(RegExpObject::FLAGS_SLOT == REGEXP_FLAGS_SLOT,
+ "FLAGS_SLOT values should be in sync with self-hosted JS");
+
+ static RegExpObject* create(JSContext* cx, Handle<JSAtom*> source,
+ NewObjectKind newKind);
+
+ public:
+ static const unsigned SHARED_SLOT = 3;
+ static const unsigned RESERVED_SLOTS = 4;
+
+ static const JSClass class_;
+ static const JSClass protoClass_;
+
+ // The maximum number of pairs a MatchResult can have, without having to
+ // allocate a bigger MatchResult.
+ static const size_t MaxPairCount = 14;
+
+ template <typename CharT>
+ static RegExpObject* create(JSContext* cx, const CharT* chars, size_t length,
+ JS::RegExpFlags flags, NewObjectKind newKind);
+
+ // This variant assumes that the characters have already previously been
+ // syntax checked.
+ static RegExpObject* createSyntaxChecked(JSContext* cx,
+ Handle<JSAtom*> source,
+ JS::RegExpFlags flags,
+ NewObjectKind newKind);
+
+ static RegExpObject* create(JSContext* cx, Handle<JSAtom*> source,
+ JS::RegExpFlags flags, NewObjectKind newKind);
+
+ /*
+ * Compute the initial shape to associate with fresh RegExp objects,
+ * encoding their initial properties. Return the shape after
+ * changing |obj|'s last property to it.
+ */
+ static SharedShape* assignInitialShape(JSContext* cx,
+ Handle<RegExpObject*> obj);
+
+ /* Accessors. */
+
+ static constexpr size_t lastIndexSlot() { return LAST_INDEX_SLOT; }
+
+ static constexpr size_t offsetOfLastIndex() {
+ return getFixedSlotOffset(lastIndexSlot());
+ }
+
+ static bool isInitialShape(RegExpObject* rx) {
+ // RegExpObject has a non-configurable lastIndex property, so there must be
+ // at least one property. Even though lastIndex is non-configurable, it can
+ // be made non-writable, so we have to check if it's still writable.
+ MOZ_ASSERT(!rx->empty());
+ PropertyInfoWithKey prop = rx->getLastProperty();
+ return prop.isDataProperty() && prop.slot() == LAST_INDEX_SLOT &&
+ prop.writable();
+ }
+
+ const Value& getLastIndex() const { return getReservedSlot(LAST_INDEX_SLOT); }
+
+ void setLastIndex(JSContext* cx, int32_t lastIndex) {
+ MOZ_ASSERT(lastIndex >= 0);
+ MOZ_ASSERT(lookupPure(cx->names().lastIndex)->writable(),
+ "can't infallibly set a non-writable lastIndex on a "
+ "RegExp that's been exposed to script");
+ setReservedSlot(LAST_INDEX_SLOT, Int32Value(lastIndex));
+ }
+ void zeroLastIndex(JSContext* cx) { setLastIndex(cx, 0); }
+
+ static JSLinearString* toString(JSContext* cx, Handle<RegExpObject*> obj);
+
+ JSAtom* getSource() const {
+ return &getReservedSlot(SOURCE_SLOT).toString()->asAtom();
+ }
+
+ void setSource(JSAtom* source) {
+ setReservedSlot(SOURCE_SLOT, StringValue(source));
+ }
+
+ /* Flags. */
+
+ static constexpr size_t flagsSlot() { return FLAGS_SLOT; }
+
+ static constexpr size_t offsetOfFlags() {
+ return getFixedSlotOffset(flagsSlot());
+ }
+
+ JS::RegExpFlags getFlags() const {
+ return JS::RegExpFlags(getFixedSlot(FLAGS_SLOT).toInt32());
+ }
+ void setFlags(JS::RegExpFlags flags) {
+ setFixedSlot(FLAGS_SLOT, Int32Value(flags.value()));
+ }
+
+ bool hasIndices() const { return getFlags().hasIndices(); }
+ bool global() const { return getFlags().global(); }
+ bool ignoreCase() const { return getFlags().ignoreCase(); }
+ bool multiline() const { return getFlags().multiline(); }
+ bool dotAll() const { return getFlags().dotAll(); }
+ bool unicode() const { return getFlags().unicode(); }
+ bool sticky() const { return getFlags().sticky(); }
+
+ bool isGlobalOrSticky() const {
+ JS::RegExpFlags flags = getFlags();
+ return flags.global() || flags.sticky();
+ }
+
+ static bool isOriginalFlagGetter(JSNative native, JS::RegExpFlags* mask);
+
+ static RegExpShared* getShared(JSContext* cx, Handle<RegExpObject*> regexp);
+
+ bool hasShared() const { return !getFixedSlot(SHARED_SLOT).isUndefined(); }
+
+ RegExpShared* getShared() const {
+ return static_cast<RegExpShared*>(getFixedSlot(SHARED_SLOT).toGCThing());
+ }
+
+ void setShared(RegExpShared* shared) {
+ MOZ_ASSERT(shared);
+ setFixedSlot(SHARED_SLOT, PrivateGCThingValue(shared));
+ }
+
+ void clearShared() { setFixedSlot(SHARED_SLOT, UndefinedValue()); }
+
+ void initIgnoringLastIndex(JSAtom* source, JS::RegExpFlags flags);
+
+ // NOTE: This method is *only* safe to call on RegExps that haven't been
+ // exposed to script, because it requires that the "lastIndex"
+ // property be writable.
+ void initAndZeroLastIndex(JSAtom* source, JS::RegExpFlags flags,
+ JSContext* cx);
+
+#ifdef DEBUG
+ [[nodiscard]] static bool dumpBytecode(JSContext* cx,
+ Handle<RegExpObject*> regexp,
+ Handle<JSLinearString*> input);
+#endif
+
+ private:
+ /*
+ * Precondition: the syntax for |source| has already been validated.
+ * Side effect: sets the private field.
+ */
+ static RegExpShared* createShared(JSContext* cx,
+ Handle<RegExpObject*> regexp);
+
+ /* Call setShared in preference to setPrivate. */
+ void setPrivate(void* priv) = delete;
+};
+
+/*
+ * Parse regexp flags. Report an error and return false if an invalid
+ * sequence of flags is encountered (repeat/invalid flag).
+ *
+ * N.B. flagStr must be rooted.
+ */
+bool ParseRegExpFlags(JSContext* cx, JSString* flagStr,
+ JS::RegExpFlags* flagsOut);
+
+// Assuming GetBuiltinClass(obj) is ESClass::RegExp, return a RegExpShared for
+// obj.
+inline RegExpShared* RegExpToShared(JSContext* cx, HandleObject obj) {
+ if (obj->is<RegExpObject>()) {
+ return RegExpObject::getShared(cx, obj.as<RegExpObject>());
+ }
+
+ return Proxy::regexp_toShared(cx, obj);
+}
+
+/* Escape all slashes and newlines in the given string. */
+extern JSLinearString* EscapeRegExpPattern(JSContext* cx, Handle<JSAtom*> src);
+
+template <typename CharT>
+extern bool HasRegExpMetaChars(const CharT* chars, size_t length);
+
+extern bool StringHasRegExpMetaChars(JSLinearString* str);
+
+} /* namespace js */
+
+#endif /* vm_RegExpObject_h */
diff --git a/js/src/vm/RegExpShared.h b/js/src/vm/RegExpShared.h
new file mode 100644
index 0000000000..5b6111c557
--- /dev/null
+++ b/js/src/vm/RegExpShared.h
@@ -0,0 +1,449 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * The compiled representation of a RegExp, potentially shared among RegExp
+ * instances created during separate evaluations of a single RegExp literal in
+ * source code.
+ */
+
+#ifndef vm_RegExpShared_h
+#define vm_RegExpShared_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/MemoryReporting.h"
+
+#include "gc/Barrier.h"
+#include "gc/Policy.h"
+#include "gc/ZoneAllocator.h"
+#include "irregexp/RegExpTypes.h"
+#include "jit/JitCode.h"
+#include "jit/JitOptions.h"
+#include "js/AllocPolicy.h"
+#include "js/RegExpFlags.h" // JS::RegExpFlag, JS::RegExpFlags
+#include "js/UbiNode.h"
+#include "js/Vector.h"
+#include "vm/ArrayObject.h"
+
+namespace js {
+
+class ArrayObject;
+class PlainObject;
+class RegExpRealm;
+class RegExpShared;
+class RegExpStatics;
+class VectorMatchPairs;
+
+using RootedRegExpShared = JS::Rooted<RegExpShared*>;
+using HandleRegExpShared = JS::Handle<RegExpShared*>;
+using MutableHandleRegExpShared = JS::MutableHandle<RegExpShared*>;
+
+enum RegExpRunStatus : int32_t {
+ RegExpRunStatus_Error = -1,
+ RegExpRunStatus_Success = 1,
+ RegExpRunStatus_Success_NotFound = 0,
+};
+
+inline bool IsNativeRegExpEnabled() {
+ return jit::HasJitBackend() && jit::JitOptions.nativeRegExp;
+}
+
+/*
+ * A RegExpShared is the compiled representation of a regexp. A RegExpShared is
+ * potentially pointed to by multiple RegExpObjects. Additionally, C++ code may
+ * have pointers to RegExpShareds on the stack. The RegExpShareds are kept in a
+ * table so that they can be reused when compiling the same regex string.
+ *
+ * To save memory, a RegExpShared is not created for a RegExpObject until it is
+ * needed for execution. When a RegExpShared needs to be created, it is looked
+ * up in a per-compartment table to allow reuse between objects.
+ *
+ * During a GC, RegExpShared instances are marked and swept like GC things.
+ * Usually, RegExpObjects clear their pointers to their RegExpShareds rather
+ * than explicitly tracing them, so that the RegExpShared and any jitcode can
+ * be reclaimed quicker. However, the RegExpShareds are traced through by
+ * objects when we are preserving jitcode in their zone, to avoid the same
+ * recompilation inefficiencies as normal Ion and baseline compilation.
+ */
+class RegExpShared
+ : public gc::CellWithTenuredGCPointer<gc::TenuredCell, JSAtom> {
+ friend class js::gc::CellAllocator;
+
+ public:
+ enum class Kind { Unparsed, Atom, RegExp };
+ enum class CodeKind { Bytecode, Jitcode, Any };
+
+ using ByteCode = js::irregexp::ByteArrayData;
+ using JitCodeTable = js::irregexp::ByteArray;
+ using JitCodeTables = Vector<JitCodeTable, 0, SystemAllocPolicy>;
+
+ private:
+ friend class RegExpStatics;
+ friend class RegExpZone;
+
+ struct RegExpCompilation {
+ HeapPtr<jit::JitCode*> jitCode;
+ ByteCode* byteCode = nullptr;
+
+ bool compiled(CodeKind kind = CodeKind::Any) const {
+ switch (kind) {
+ case CodeKind::Bytecode:
+ return !!byteCode;
+ case CodeKind::Jitcode:
+ return !!jitCode;
+ case CodeKind::Any:
+ return !!byteCode || !!jitCode;
+ }
+ MOZ_CRASH("Unreachable");
+ }
+
+ size_t byteCodeLength() const {
+ MOZ_ASSERT(byteCode);
+ return byteCode->length;
+ }
+ };
+
+ public:
+ /* Source to the RegExp, for lazy compilation. Stored in the cell header. */
+ JSAtom* getSource() const { return headerPtr(); }
+
+ private:
+ RegExpCompilation compilationArray[2];
+
+ uint32_t pairCount_;
+ JS::RegExpFlags flags;
+
+ RegExpShared::Kind kind_ = Kind::Unparsed;
+ GCPtr<JSAtom*> patternAtom_;
+ uint32_t maxRegisters_ = 0;
+ uint32_t ticks_ = 0;
+
+ uint32_t numNamedCaptures_ = {};
+ uint32_t* namedCaptureIndices_ = {};
+ GCPtr<PlainObject*> groupsTemplate_ = {};
+
+ static int CompilationIndex(bool latin1) { return latin1 ? 0 : 1; }
+
+ // Tables referenced by JIT code.
+ JitCodeTables tables;
+
+ /* Internal functions. */
+ RegExpShared(JSAtom* source, JS::RegExpFlags flags);
+
+ const RegExpCompilation& compilation(bool latin1) const {
+ return compilationArray[CompilationIndex(latin1)];
+ }
+
+ RegExpCompilation& compilation(bool latin1) {
+ return compilationArray[CompilationIndex(latin1)];
+ }
+
+ public:
+ ~RegExpShared() = delete;
+
+ static bool compileIfNecessary(JSContext* cx, MutableHandleRegExpShared res,
+ Handle<JSLinearString*> input, CodeKind code);
+
+ static RegExpRunStatus executeAtom(MutableHandleRegExpShared re,
+ Handle<JSLinearString*> input,
+ size_t start, VectorMatchPairs* matches);
+
+ // Execute this RegExp on input starting from searchIndex, filling in matches.
+ static RegExpRunStatus execute(JSContext* cx, MutableHandleRegExpShared res,
+ Handle<JSLinearString*> input,
+ size_t searchIndex, VectorMatchPairs* matches);
+
+ // Register a table with this RegExpShared, and take ownership.
+ bool addTable(JitCodeTable table) { return tables.append(std::move(table)); }
+
+ /* Accessors */
+
+ size_t pairCount() const {
+ MOZ_ASSERT(kind() != Kind::Unparsed);
+ return pairCount_;
+ }
+
+ RegExpShared::Kind kind() const { return kind_; }
+
+ // Use simple string matching for this regexp.
+ void useAtomMatch(Handle<JSAtom*> pattern);
+
+ // Use the regular expression engine for this regexp.
+ void useRegExpMatch(size_t parenCount);
+
+ static void InitializeNamedCaptures(JSContext* cx, HandleRegExpShared re,
+ uint32_t numNamedCaptures,
+ Handle<PlainObject*> templateObject,
+ uint32_t* captureIndices);
+ PlainObject* getGroupsTemplate() { return groupsTemplate_; }
+
+ void tierUpTick();
+ bool markedForTierUp() const;
+
+ void setByteCode(ByteCode* code, bool latin1) {
+ compilation(latin1).byteCode = code;
+ }
+ ByteCode* getByteCode(bool latin1) const {
+ return compilation(latin1).byteCode;
+ }
+ void setJitCode(jit::JitCode* code, bool latin1) {
+ compilation(latin1).jitCode = code;
+ }
+ jit::JitCode* getJitCode(bool latin1) const {
+ return compilation(latin1).jitCode;
+ }
+ uint32_t getMaxRegisters() const { return maxRegisters_; }
+ void updateMaxRegisters(uint32_t numRegisters) {
+ maxRegisters_ = std::max(maxRegisters_, numRegisters);
+ }
+
+ uint32_t numNamedCaptures() const { return numNamedCaptures_; }
+ int32_t getNamedCaptureIndex(uint32_t idx) const {
+ MOZ_ASSERT(idx < numNamedCaptures());
+ MOZ_ASSERT(namedCaptureIndices_);
+ return namedCaptureIndices_[idx];
+ }
+
+ JSAtom* patternAtom() const { return patternAtom_; }
+
+ JS::RegExpFlags getFlags() const { return flags; }
+
+ bool hasIndices() const { return flags.hasIndices(); }
+ bool global() const { return flags.global(); }
+ bool ignoreCase() const { return flags.ignoreCase(); }
+ bool multiline() const { return flags.multiline(); }
+ bool dotAll() const { return flags.dotAll(); }
+ bool unicode() const { return flags.unicode(); }
+ bool sticky() const { return flags.sticky(); }
+
+ bool isCompiled(bool latin1, CodeKind codeKind = CodeKind::Any) const {
+ return compilation(latin1).compiled(codeKind);
+ }
+ bool isCompiled() const { return isCompiled(true) || isCompiled(false); }
+
+ void traceChildren(JSTracer* trc);
+ void discardJitCode();
+ void finalize(JS::GCContext* gcx);
+
+ static size_t offsetOfSource() { return offsetOfHeaderPtr(); }
+
+ static size_t offsetOfPatternAtom() {
+ return offsetof(RegExpShared, patternAtom_);
+ }
+
+ static size_t offsetOfFlags() { return offsetof(RegExpShared, flags); }
+
+ static size_t offsetOfPairCount() {
+ return offsetof(RegExpShared, pairCount_);
+ }
+
+ static size_t offsetOfJitCode(bool latin1) {
+ return offsetof(RegExpShared, compilationArray) +
+ (CompilationIndex(latin1) * sizeof(RegExpCompilation)) +
+ offsetof(RegExpCompilation, jitCode);
+ }
+
+ static size_t offsetOfGroupsTemplate() {
+ return offsetof(RegExpShared, groupsTemplate_);
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
+
+#ifdef DEBUG
+ static bool dumpBytecode(JSContext* cx, MutableHandleRegExpShared res,
+ Handle<JSLinearString*> input);
+#endif
+
+ public:
+ static const JS::TraceKind TraceKind = JS::TraceKind::RegExpShared;
+};
+
+class RegExpZone {
+ struct Key {
+ JSAtom* atom = nullptr;
+ JS::RegExpFlags flags = JS::RegExpFlag::NoFlags;
+
+ Key() = default;
+ Key(JSAtom* atom, JS::RegExpFlags flags) : atom(atom), flags(flags) {}
+ MOZ_IMPLICIT Key(const WeakHeapPtr<RegExpShared*>& shared)
+ : atom(shared.unbarrieredGet()->getSource()),
+ flags(shared.unbarrieredGet()->getFlags()) {}
+
+ using Lookup = Key;
+ static HashNumber hash(const Lookup& l) {
+ HashNumber hash = DefaultHasher<JSAtom*>::hash(l.atom);
+ return mozilla::AddToHash(hash, l.flags.value());
+ }
+ static bool match(Key l, Key r) {
+ return l.atom == r.atom && l.flags == r.flags;
+ }
+ };
+
+ /*
+ * The set of all RegExpShareds in the zone. On every GC, every RegExpShared
+ * that was not marked is deleted and removed from the set.
+ */
+ using Set = JS::WeakCache<
+ JS::GCHashSet<WeakHeapPtr<RegExpShared*>, Key, ZoneAllocPolicy>>;
+ Set set_;
+
+ public:
+ explicit RegExpZone(Zone* zone);
+
+ ~RegExpZone() { MOZ_ASSERT(set_.empty()); }
+
+ bool empty() const { return set_.empty(); }
+
+ RegExpShared* maybeGet(JSAtom* source, JS::RegExpFlags flags) const {
+ Set::Ptr p = set_.lookup(Key(source, flags));
+ return p ? *p : nullptr;
+ }
+
+ RegExpShared* get(JSContext* cx, Handle<JSAtom*> source,
+ JS::RegExpFlags flags);
+
+#ifdef DEBUG
+ void clear() { set_.clear(); }
+#endif
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+class RegExpRealm {
+ public:
+ enum ResultTemplateKind { Normal, WithIndices, Indices, NumKinds };
+
+ private:
+ /*
+ * The template objects that the result of re.exec() is based on, if
+ * there is a result. These are used in CreateRegExpMatchResult.
+ * There are three template objects, each of which is an ArrayObject
+ * with some additional properties. We decide which to use based on
+ * the |hasIndices| (/d) flag.
+ *
+ * Normal: Has |index|, |input|, and |groups| properties.
+ * Used for the result object if |hasIndices| is not set.
+ *
+ * WithIndices: Has |index|, |input|, |groups|, and |indices| properties.
+ * Used for the result object if |hasIndices| is set.
+ *
+ * Indices: Has a |groups| property. If |hasIndices| is set, used
+ * for the |.indices| property of the result object.
+ */
+ WeakHeapPtr<ArrayObject*>
+ matchResultTemplateObjects_[ResultTemplateKind::NumKinds];
+
+ /*
+ * The shape of RegExp.prototype object that satisfies following:
+ * * RegExp.prototype.flags getter is not modified
+ * * RegExp.prototype.global getter is not modified
+ * * RegExp.prototype.ignoreCase getter is not modified
+ * * RegExp.prototype.multiline getter is not modified
+ * * RegExp.prototype.dotAll getter is not modified
+ * * RegExp.prototype.sticky getter is not modified
+ * * RegExp.prototype.unicode getter is not modified
+ * * RegExp.prototype.exec is an own data property
+ * * RegExp.prototype[@@match] is an own data property
+ * * RegExp.prototype[@@search] is an own data property
+ */
+ WeakHeapPtr<Shape*> optimizableRegExpPrototypeShape_;
+
+ /*
+ * The shape of RegExp instance that satisfies following:
+ * * lastProperty is lastIndex
+ * * prototype is RegExp.prototype
+ */
+ WeakHeapPtr<Shape*> optimizableRegExpInstanceShape_;
+
+ ArrayObject* createMatchResultTemplateObject(JSContext* cx,
+ ResultTemplateKind kind);
+
+ public:
+ explicit RegExpRealm();
+
+ void traceWeak(JSTracer* trc);
+
+ static const size_t MatchResultObjectIndexSlot = 0;
+ static const size_t MatchResultObjectInputSlot = 1;
+ static const size_t MatchResultObjectGroupsSlot = 2;
+ static const size_t MatchResultObjectIndicesSlot = 3;
+
+ static const size_t IndicesGroupsSlot = 0;
+
+ static size_t offsetOfMatchResultObjectIndexSlot() {
+ return sizeof(Value) * MatchResultObjectIndexSlot;
+ }
+ static size_t offsetOfMatchResultObjectInputSlot() {
+ return sizeof(Value) * MatchResultObjectInputSlot;
+ }
+ static size_t offsetOfMatchResultObjectGroupsSlot() {
+ return sizeof(Value) * MatchResultObjectGroupsSlot;
+ }
+ static size_t offsetOfMatchResultObjectIndicesSlot() {
+ return sizeof(Value) * MatchResultObjectIndicesSlot;
+ }
+
+ /* Get or create template object used to base the result of .exec() on. */
+ ArrayObject* getOrCreateMatchResultTemplateObject(
+ JSContext* cx, ResultTemplateKind kind = ResultTemplateKind::Normal) {
+ if (matchResultTemplateObjects_[kind]) {
+ return matchResultTemplateObjects_[kind];
+ }
+ return createMatchResultTemplateObject(cx, kind);
+ }
+
+ Shape* getOptimizableRegExpPrototypeShape() {
+ return optimizableRegExpPrototypeShape_;
+ }
+ void setOptimizableRegExpPrototypeShape(Shape* shape) {
+ optimizableRegExpPrototypeShape_ = shape;
+ }
+ Shape* getOptimizableRegExpInstanceShape() {
+ return optimizableRegExpInstanceShape_;
+ }
+ void setOptimizableRegExpInstanceShape(Shape* shape) {
+ optimizableRegExpInstanceShape_ = shape;
+ }
+
+ static size_t offsetOfOptimizableRegExpPrototypeShape() {
+ return offsetof(RegExpRealm, optimizableRegExpPrototypeShape_);
+ }
+ static size_t offsetOfOptimizableRegExpInstanceShape() {
+ return offsetof(RegExpRealm, optimizableRegExpInstanceShape_);
+ }
+};
+
+RegExpRunStatus ExecuteRegExpAtomRaw(RegExpShared* re, JSLinearString* input,
+ size_t start, MatchPairs* matchPairs);
+
+} /* namespace js */
+
+namespace JS {
+namespace ubi {
+
+template <>
+class Concrete<js::RegExpShared> : TracerConcrete<js::RegExpShared> {
+ protected:
+ explicit Concrete(js::RegExpShared* ptr)
+ : TracerConcrete<js::RegExpShared>(ptr) {}
+
+ public:
+ static void construct(void* storage, js::RegExpShared* ptr) {
+ new (storage) Concrete(ptr);
+ }
+
+ CoarseType coarseType() const final { return CoarseType::Other; }
+
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override;
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+} // namespace ubi
+} // namespace JS
+
+#endif /* vm_RegExpShared_h */
diff --git a/js/src/vm/RegExpStatics.cpp b/js/src/vm/RegExpStatics.cpp
new file mode 100644
index 0000000000..e32d18cc97
--- /dev/null
+++ b/js/src/vm/RegExpStatics.cpp
@@ -0,0 +1,61 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/RegExpStatics.h"
+
+#include "gc/Zone.h"
+#include "vm/RegExpShared.h"
+
+using namespace js;
+
+// static
+UniquePtr<RegExpStatics> RegExpStatics::create(JSContext* cx) {
+ return cx->make_unique<RegExpStatics>();
+}
+
+bool RegExpStatics::executeLazy(JSContext* cx) {
+ if (!pendingLazyEvaluation) {
+ return true;
+ }
+
+ MOZ_ASSERT(lazySource);
+ MOZ_ASSERT(matchesInput);
+ MOZ_ASSERT(lazyIndex != size_t(-1));
+
+ /* Retrieve or create the RegExpShared in this zone. */
+ Rooted<JSAtom*> source(cx, lazySource);
+ RootedRegExpShared shared(cx,
+ cx->zone()->regExps().get(cx, source, lazyFlags));
+ if (!shared) {
+ return false;
+ }
+
+ /*
+ * It is not necessary to call aboutToWrite(): evaluation of
+ * implicit copies is safe.
+ */
+
+ /* Execute the full regular expression. */
+ Rooted<JSLinearString*> input(cx, matchesInput);
+ RegExpRunStatus status =
+ RegExpShared::execute(cx, &shared, input, lazyIndex, &this->matches);
+ if (status == RegExpRunStatus_Error) {
+ return false;
+ }
+
+ /*
+ * RegExpStatics are only updated on successful (matching) execution.
+ * Re-running the same expression must therefore produce a matching result.
+ */
+ MOZ_ASSERT(status == RegExpRunStatus_Success);
+
+ /* Unset lazy state and remove rooted values that now have no use. */
+ pendingLazyEvaluation = false;
+ lazySource = nullptr;
+ lazyIndex = size_t(-1);
+
+ return true;
+}
diff --git a/js/src/vm/RegExpStatics.h b/js/src/vm/RegExpStatics.h
new file mode 100644
index 0000000000..f34ae2dc9a
--- /dev/null
+++ b/js/src/vm/RegExpStatics.h
@@ -0,0 +1,307 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_RegExpStatics_h
+#define vm_RegExpStatics_h
+
+#include "js/RegExpFlags.h"
+#include "vm/JSContext.h"
+#include "vm/MatchPairs.h"
+#include "vm/Runtime.h"
+
+namespace js {
+
+class RegExpStatics {
+ /* The latest RegExp output, set after execution. */
+ VectorMatchPairs matches;
+ HeapPtr<JSLinearString*> matchesInput;
+
+ /*
+ * The previous RegExp input, used to resolve lazy state.
+ * A raw RegExpShared cannot be stored because it may be in
+ * a different compartment via evalcx().
+ */
+ HeapPtr<JSAtom*> lazySource;
+ JS::RegExpFlags lazyFlags;
+ size_t lazyIndex;
+
+ /* The latest RegExp input, set before execution. */
+ HeapPtr<JSString*> pendingInput;
+
+ /*
+ * If non-zero, |matchesInput| and the |lazy*| fields may be used
+ * to replay the last executed RegExp, and |matches| is invalid.
+ */
+ int32_t pendingLazyEvaluation;
+
+ public:
+ RegExpStatics() { clear(); }
+ static UniquePtr<RegExpStatics> create(JSContext* cx);
+
+ private:
+ bool executeLazy(JSContext* cx);
+
+ inline void checkInvariants();
+
+ /*
+ * Check whether a match for pair |pairNum| occurred. If so, allocate and
+ * store the match string in |*out|; otherwise place |undefined| in |*out|.
+ */
+ bool makeMatch(JSContext* cx, size_t pairNum, MutableHandleValue out);
+ bool createDependent(JSContext* cx, size_t start, size_t end,
+ MutableHandleValue out);
+
+ public:
+ /* Mutators. */
+ inline bool updateFromMatchPairs(JSContext* cx, JSLinearString* input,
+ VectorMatchPairs& newPairs);
+
+ inline void clear();
+
+ /* Corresponds to JSAPI functionality to set the pending RegExp input. */
+ void reset(JSString* newInput) {
+ clear();
+ pendingInput = newInput;
+ checkInvariants();
+ }
+
+ inline void setPendingInput(JSString* newInput);
+
+ public:
+ void trace(JSTracer* trc) {
+ /*
+ * Changes to this function must also be reflected in
+ * RegExpStatics::AutoRooter::trace().
+ */
+ TraceNullableEdge(trc, &matchesInput, "res->matchesInput");
+ TraceNullableEdge(trc, &lazySource, "res->lazySource");
+ TraceNullableEdge(trc, &pendingInput, "res->pendingInput");
+ }
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + matches.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ /* Value creators. */
+
+ bool createPendingInput(JSContext* cx, MutableHandleValue out);
+ bool createLastMatch(JSContext* cx, MutableHandleValue out);
+ bool createLastParen(JSContext* cx, MutableHandleValue out);
+ bool createParen(JSContext* cx, size_t pairNum, MutableHandleValue out);
+ bool createLeftContext(JSContext* cx, MutableHandleValue out);
+ bool createRightContext(JSContext* cx, MutableHandleValue out);
+
+ static size_t offsetOfPendingInput() {
+ return offsetof(RegExpStatics, pendingInput);
+ }
+
+ static size_t offsetOfMatchesInput() {
+ return offsetof(RegExpStatics, matchesInput);
+ }
+
+ static size_t offsetOfLazySource() {
+ return offsetof(RegExpStatics, lazySource);
+ }
+
+ static size_t offsetOfLazyFlags() {
+ return offsetof(RegExpStatics, lazyFlags);
+ }
+
+ static size_t offsetOfLazyIndex() {
+ return offsetof(RegExpStatics, lazyIndex);
+ }
+
+ static size_t offsetOfPendingLazyEvaluation() {
+ return offsetof(RegExpStatics, pendingLazyEvaluation);
+ }
+};
+
+inline bool RegExpStatics::createDependent(JSContext* cx, size_t start,
+ size_t end, MutableHandleValue out) {
+ /* Private function: caller must perform lazy evaluation. */
+ MOZ_ASSERT(!pendingLazyEvaluation);
+
+ MOZ_ASSERT(start <= end);
+ MOZ_ASSERT(end <= matchesInput->length());
+ JSString* str = NewDependentString(cx, matchesInput, start, end - start);
+ if (!str) {
+ return false;
+ }
+ out.setString(str);
+ return true;
+}
+
+inline bool RegExpStatics::createPendingInput(JSContext* cx,
+ MutableHandleValue out) {
+ /* Lazy evaluation need not be resolved to return the input. */
+ out.setString(pendingInput ? pendingInput.get()
+ : cx->runtime()->emptyString.ref());
+ return true;
+}
+
+inline bool RegExpStatics::makeMatch(JSContext* cx, size_t pairNum,
+ MutableHandleValue out) {
+ /* Private function: caller must perform lazy evaluation. */
+ MOZ_ASSERT(!pendingLazyEvaluation);
+
+ if (matches.empty() || pairNum >= matches.pairCount() ||
+ matches[pairNum].isUndefined()) {
+ out.setUndefined();
+ return true;
+ }
+
+ const MatchPair& pair = matches[pairNum];
+ return createDependent(cx, pair.start, pair.limit, out);
+}
+
+inline bool RegExpStatics::createLastMatch(JSContext* cx,
+ MutableHandleValue out) {
+ if (!executeLazy(cx)) {
+ return false;
+ }
+ return makeMatch(cx, 0, out);
+}
+
+inline bool RegExpStatics::createLastParen(JSContext* cx,
+ MutableHandleValue out) {
+ if (!executeLazy(cx)) {
+ return false;
+ }
+
+ if (matches.empty() || matches.pairCount() == 1) {
+ out.setString(cx->runtime()->emptyString);
+ return true;
+ }
+ const MatchPair& pair = matches[matches.pairCount() - 1];
+ if (pair.start == -1) {
+ out.setString(cx->runtime()->emptyString);
+ return true;
+ }
+ MOZ_ASSERT(pair.start >= 0 && pair.limit >= 0);
+ MOZ_ASSERT(pair.limit >= pair.start);
+ return createDependent(cx, pair.start, pair.limit, out);
+}
+
+inline bool RegExpStatics::createParen(JSContext* cx, size_t pairNum,
+ MutableHandleValue out) {
+ MOZ_ASSERT(pairNum >= 1);
+ if (!executeLazy(cx)) {
+ return false;
+ }
+
+ if (matches.empty() || pairNum >= matches.pairCount()) {
+ out.setString(cx->runtime()->emptyString);
+ return true;
+ }
+ return makeMatch(cx, pairNum, out);
+}
+
+inline bool RegExpStatics::createLeftContext(JSContext* cx,
+ MutableHandleValue out) {
+ if (!executeLazy(cx)) {
+ return false;
+ }
+
+ if (matches.empty()) {
+ out.setString(cx->runtime()->emptyString);
+ return true;
+ }
+ if (matches[0].start < 0) {
+ out.setUndefined();
+ return true;
+ }
+ return createDependent(cx, 0, matches[0].start, out);
+}
+
+inline bool RegExpStatics::createRightContext(JSContext* cx,
+ MutableHandleValue out) {
+ if (!executeLazy(cx)) {
+ return false;
+ }
+
+ if (matches.empty()) {
+ out.setString(cx->runtime()->emptyString);
+ return true;
+ }
+ if (matches[0].limit < 0) {
+ out.setUndefined();
+ return true;
+ }
+ return createDependent(cx, matches[0].limit, matchesInput->length(), out);
+}
+
+inline bool RegExpStatics::updateFromMatchPairs(JSContext* cx,
+ JSLinearString* input,
+ VectorMatchPairs& newPairs) {
+ MOZ_ASSERT(input);
+
+ /* Unset all lazy state. */
+ pendingLazyEvaluation = false;
+ this->lazySource = nullptr;
+ this->lazyIndex = size_t(-1);
+
+ BarrieredSetPair<JSString, JSLinearString>(cx->zone(), pendingInput, input,
+ matchesInput, input);
+
+ if (!matches.initArrayFrom(newPairs)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+inline void RegExpStatics::clear() {
+ matches.forgetArray();
+ matchesInput = nullptr;
+ lazySource = nullptr;
+ lazyFlags = JS::RegExpFlag::NoFlags;
+ lazyIndex = size_t(-1);
+ pendingInput = nullptr;
+ pendingLazyEvaluation = false;
+}
+
+inline void RegExpStatics::setPendingInput(JSString* newInput) {
+ pendingInput = newInput;
+}
+
+inline void RegExpStatics::checkInvariants() {
+#ifdef DEBUG
+ if (pendingLazyEvaluation) {
+ MOZ_ASSERT(lazySource);
+ MOZ_ASSERT(matchesInput);
+ MOZ_ASSERT(lazyIndex != size_t(-1));
+ return;
+ }
+
+ if (matches.empty()) {
+ MOZ_ASSERT(!matchesInput);
+ return;
+ }
+
+ /* Pair count is non-zero, so there must be match pairs input. */
+ MOZ_ASSERT(matchesInput);
+ size_t mpiLen = matchesInput->length();
+
+ /* Both members of the first pair must be non-negative. */
+ MOZ_ASSERT(!matches[0].isUndefined());
+ MOZ_ASSERT(matches[0].limit >= 0);
+
+ /* Present pairs must be valid. */
+ for (size_t i = 0; i < matches.pairCount(); i++) {
+ if (matches[i].isUndefined()) {
+ continue;
+ }
+ const MatchPair& pair = matches[i];
+ MOZ_ASSERT(mpiLen >= size_t(pair.limit) && pair.limit >= pair.start &&
+ pair.start >= 0);
+ }
+#endif /* DEBUG */
+}
+
+} /* namespace js */
+
+#endif /* vm_RegExpStatics_h */
diff --git a/js/src/vm/Runtime.cpp b/js/src/vm/Runtime.cpp
new file mode 100644
index 0000000000..eaa3052ff8
--- /dev/null
+++ b/js/src/vm/Runtime.cpp
@@ -0,0 +1,847 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/Runtime.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/DebugOnly.h"
+#if JS_HAS_INTL_API
+# include "mozilla/intl/Locale.h"
+#endif
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/ThreadLocal.h"
+
+#include <locale.h>
+#include <string.h>
+
+#include "jsfriendapi.h"
+#include "jsmath.h"
+
+#include "frontend/CompilationStencil.h"
+#include "frontend/ParserAtom.h" // frontend::WellKnownParserAtoms
+#include "gc/GC.h"
+#include "gc/PublicIterators.h"
+#include "jit/IonCompileTask.h"
+#include "jit/JitRuntime.h"
+#include "jit/Simulator.h"
+#include "js/AllocationLogging.h" // JS_COUNT_CTOR, JS_COUNT_DTOR
+#include "js/experimental/JSStencil.h"
+#include "js/experimental/SourceHook.h"
+#include "js/friend/ErrorMessages.h" // JSMSG_*
+#include "js/Interrupt.h"
+#include "js/MemoryMetrics.h"
+#include "js/Stack.h" // JS::NativeStackLimitMin
+#include "js/Wrapper.h"
+#include "js/WrapperCallbacks.h"
+#include "vm/DateTime.h"
+#include "vm/JSObject.h"
+#include "vm/JSScript.h"
+#include "vm/PromiseObject.h" // js::PromiseObject
+#include "vm/SharedImmutableStringsCache.h"
+#include "vm/Warnings.h" // js::WarnNumberUC
+#include "wasm/WasmSignalHandlers.h"
+
+#include "debugger/DebugAPI-inl.h"
+#include "gc/ArenaList-inl.h"
+#include "vm/JSContext-inl.h"
+#include "vm/Realm-inl.h"
+
+using namespace js;
+
+using mozilla::Atomic;
+using mozilla::DebugOnly;
+using mozilla::NegativeInfinity;
+using mozilla::PositiveInfinity;
+
+/* static */ MOZ_THREAD_LOCAL(JSContext*) js::TlsContext;
+/* static */
+Atomic<size_t> JSRuntime::liveRuntimesCount;
+Atomic<JS::LargeAllocationFailureCallback> js::OnLargeAllocationFailure;
+
+JS::FilenameValidationCallback js::gFilenameValidationCallback = nullptr;
+
+namespace js {
+
+#ifndef __wasi__
+bool gCanUseExtraThreads = true;
+#else
+bool gCanUseExtraThreads = false;
+#endif
+} // namespace js
+
+void js::DisableExtraThreads() { gCanUseExtraThreads = false; }
+
+const JSSecurityCallbacks js::NullSecurityCallbacks = {};
+
+static const JSWrapObjectCallbacks DefaultWrapObjectCallbacks = {
+ TransparentObjectWrapper, nullptr};
+
+extern bool DefaultHostEnsureCanAddPrivateElementCallback(JSContext* cx,
+ HandleValue val);
+
+static size_t ReturnZeroSize(const void* p) { return 0; }
+
+JSRuntime::JSRuntime(JSRuntime* parentRuntime)
+ : parentRuntime(parentRuntime),
+#ifdef DEBUG
+ updateChildRuntimeCount(parentRuntime),
+ initialized_(false),
+#endif
+ mainContext_(nullptr),
+ profilerSampleBufferRangeStart_(0),
+ telemetryCallback(nullptr),
+ consumeStreamCallback(nullptr),
+ reportStreamErrorCallback(nullptr),
+ hadOutOfMemory(false),
+ allowRelazificationForTesting(false),
+ destroyCompartmentCallback(nullptr),
+ sizeOfIncludingThisCompartmentCallback(nullptr),
+ destroyRealmCallback(nullptr),
+ realmNameCallback(nullptr),
+ securityCallbacks(&NullSecurityCallbacks),
+ DOMcallbacks(nullptr),
+ destroyPrincipals(nullptr),
+ readPrincipals(nullptr),
+ canAddPrivateElement(&DefaultHostEnsureCanAddPrivateElementCallback),
+ warningReporter(nullptr),
+ selfHostedLazyScript(),
+ geckoProfiler_(thisFromCtor()),
+ trustedPrincipals_(nullptr),
+ wrapObjectCallbacks(&DefaultWrapObjectCallbacks),
+ preserveWrapperCallback(nullptr),
+ scriptEnvironmentPreparer(nullptr),
+ ctypesActivityCallback(nullptr),
+ windowProxyClass_(nullptr),
+ numRealms(0),
+ numDebuggeeRealms_(0),
+ numDebuggeeRealmsObservingCoverage_(0),
+ localeCallbacks(nullptr),
+ defaultLocale(nullptr),
+ profilingScripts(false),
+ scriptAndCountsVector(nullptr),
+ watchtowerTestingLog(nullptr),
+ lcovOutput_(),
+ jitRuntime_(nullptr),
+ gc(thisFromCtor()),
+ emptyString(nullptr),
+#if !JS_HAS_INTL_API
+ thousandsSeparator(nullptr),
+ decimalSeparator(nullptr),
+ numGrouping(nullptr),
+#endif
+ beingDestroyed_(false),
+ allowContentJS_(true),
+ atoms_(nullptr),
+ permanentAtoms_(nullptr),
+ staticStrings(nullptr),
+ commonNames(nullptr),
+ wellKnownSymbols(nullptr),
+ scriptDataTableHolder_(SharedScriptDataTableHolder::NeedsLock::No),
+ liveSABs(0),
+ beforeWaitCallback(nullptr),
+ afterWaitCallback(nullptr),
+ offthreadIonCompilationEnabled_(true),
+ parallelParsingEnabled_(true),
+ autoWritableJitCodeActive_(false),
+ oomCallback(nullptr),
+ debuggerMallocSizeOf(ReturnZeroSize),
+ stackFormat_(parentRuntime ? js::StackFormat::Default
+ : js::StackFormat::SpiderMonkey),
+ wasmInstances(mutexid::WasmRuntimeInstances),
+ moduleAsyncEvaluatingPostOrder(ASYNC_EVALUATING_POST_ORDER_INIT),
+ moduleResolveHook(),
+ moduleMetadataHook(),
+ moduleDynamicImportHook(),
+ scriptPrivateAddRefHook(),
+ scriptPrivateReleaseHook() {
+ JS_COUNT_CTOR(JSRuntime);
+ liveRuntimesCount++;
+
+#ifndef __wasi__
+ // See function comment for why we call this now, not in JS_Init().
+ wasm::EnsureEagerProcessSignalHandlers();
+#endif // __wasi__
+}
+
+JSRuntime::~JSRuntime() {
+ JS_COUNT_DTOR(JSRuntime);
+ MOZ_ASSERT(!initialized_);
+
+ DebugOnly<size_t> oldCount = liveRuntimesCount--;
+ MOZ_ASSERT(oldCount > 0);
+
+ MOZ_ASSERT(wasmInstances.lock()->empty());
+
+ MOZ_ASSERT(numRealms == 0);
+ MOZ_ASSERT(numDebuggeeRealms_ == 0);
+ MOZ_ASSERT(numDebuggeeRealmsObservingCoverage_ == 0);
+}
+
+bool JSRuntime::init(JSContext* cx, uint32_t maxbytes) {
+#ifdef DEBUG
+ MOZ_ASSERT(!initialized_);
+ initialized_ = true;
+#endif
+
+ if (CanUseExtraThreads() && !EnsureHelperThreadsInitialized()) {
+ return false;
+ }
+
+ mainContext_ = cx;
+
+ if (!gc.init(maxbytes)) {
+ return false;
+ }
+
+ if (!InitRuntimeNumberState(this)) {
+ return false;
+ }
+
+ // As a hack, we clear our timezone cache every time we create a new runtime.
+ // Also see the comment in JS::Realm::init().
+ js::ResetTimeZoneInternal(ResetTimeZoneMode::DontResetIfOffsetUnchanged);
+
+ caches().megamorphicSetPropCache = MakeUnique<MegamorphicSetPropCache>();
+ if (!caches().megamorphicSetPropCache) {
+ return false;
+ }
+
+ return true;
+}
+
+void JSRuntime::destroyRuntime() {
+ MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
+ MOZ_ASSERT(childRuntimeCount == 0);
+ MOZ_ASSERT(initialized_);
+
+#ifdef JS_HAS_INTL_API
+ sharedIntlData.ref().destroyInstance();
+#endif
+
+ watchtowerTestingLog.ref().reset();
+
+ // Caches might hold on ScriptData which are saved in the ScriptDataTable.
+ // Clear all stencils from caches to remove ScriptDataTable entries.
+ caches().purgeStencils();
+
+ if (gc.wasInitialized()) {
+ /*
+ * Finish any in-progress GCs first.
+ */
+ JSContext* cx = mainContextFromOwnThread();
+ if (JS::IsIncrementalGCInProgress(cx)) {
+ gc::FinishGC(cx);
+ }
+
+ /* Free source hook early, as its destructor may want to delete roots. */
+ sourceHook = nullptr;
+
+ /*
+ * Cancel any pending, in progress or completed Ion compilations and
+ * parse tasks. Waiting for wasm and compression tasks is done
+ * synchronously (on the main thread or during parse tasks), so no
+ * explicit canceling is needed for these.
+ */
+ CancelOffThreadIonCompile(this);
+ CancelOffThreadParses(this);
+ CancelOffThreadDelazify(this);
+ CancelOffThreadCompressions(this);
+
+ /*
+ * Flag us as being destroyed. This allows the GC to free things like
+ * interned atoms and Ion trampolines.
+ */
+ beingDestroyed_ = true;
+
+ /* Remove persistent GC roots. */
+ gc.finishRoots();
+
+ /* Allow the GC to release scripts that were being profiled. */
+ profilingScripts = false;
+
+ JS::PrepareForFullGC(cx);
+ gc.gc(JS::GCOptions::Shutdown, JS::GCReason::DESTROY_RUNTIME);
+ }
+
+ AutoNoteSingleThreadedRegion anstr;
+
+ MOZ_ASSERT(scriptDataTableHolder().getWithoutLock().empty());
+
+#if !JS_HAS_INTL_API
+ FinishRuntimeNumberState(this);
+#endif
+
+ gc.finish();
+
+ defaultLocale = nullptr;
+ js_delete(jitRuntime_.ref());
+
+#ifdef DEBUG
+ initialized_ = false;
+#endif
+}
+
+void JSRuntime::addTelemetry(JSMetric id, uint32_t sample) {
+ if (telemetryCallback) {
+ (*telemetryCallback)(id, sample);
+ }
+}
+
+void JSRuntime::setTelemetryCallback(
+ JSRuntime* rt, JSAccumulateTelemetryDataCallback callback) {
+ rt->telemetryCallback = callback;
+}
+
+void JSRuntime::setUseCounter(JSObject* obj, JSUseCounter counter) {
+ if (useCounterCallback) {
+ (*useCounterCallback)(obj, counter);
+ }
+}
+
+void JSRuntime::setUseCounterCallback(JSRuntime* rt,
+ JSSetUseCounterCallback callback) {
+ rt->useCounterCallback = callback;
+}
+
+void JSRuntime::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::RuntimeSizes* rtSizes) {
+ rtSizes->object += mallocSizeOf(this);
+
+ rtSizes->atomsTable += atoms().sizeOfIncludingThis(mallocSizeOf);
+ rtSizes->gc.marker += gc.markers.sizeOfExcludingThis(mallocSizeOf);
+ for (auto& marker : gc.markers) {
+ rtSizes->gc.marker += marker->sizeOfIncludingThis(mallocSizeOf);
+ }
+
+ if (!parentRuntime) {
+ rtSizes->atomsTable += mallocSizeOf(staticStrings);
+ rtSizes->atomsTable += mallocSizeOf(commonNames);
+ rtSizes->atomsTable += permanentAtoms()->sizeOfIncludingThis(mallocSizeOf);
+
+ rtSizes->selfHostStencil =
+ selfHostStencilInput_->sizeOfIncludingThis(mallocSizeOf) +
+ selfHostStencil_->sizeOfIncludingThis(mallocSizeOf) +
+ selfHostScriptMap.ref().shallowSizeOfExcludingThis(mallocSizeOf);
+ }
+
+ JSContext* cx = mainContextFromAnyThread();
+ rtSizes->contexts += cx->sizeOfIncludingThis(mallocSizeOf);
+ rtSizes->temporary += cx->tempLifoAlloc().sizeOfExcludingThis(mallocSizeOf);
+ rtSizes->interpreterStack +=
+ cx->interpreterStack().sizeOfExcludingThis(mallocSizeOf);
+ rtSizes->uncompressedSourceCache +=
+ caches().uncompressedSourceCache.sizeOfExcludingThis(mallocSizeOf);
+
+ rtSizes->gc.nurseryCommitted += gc.nursery().committed();
+ rtSizes->gc.nurseryMallocedBuffers +=
+ gc.nursery().sizeOfMallocedBuffers(mallocSizeOf);
+ gc.storeBuffer().addSizeOfExcludingThis(mallocSizeOf, &rtSizes->gc);
+
+ rtSizes->gc.nurseryMallocedBlockCache +=
+ gc.nursery().sizeOfMallocedBlockCache(mallocSizeOf);
+ rtSizes->gc.nurseryTrailerBlockSets +=
+ gc.nursery().sizeOfTrailerBlockSets(mallocSizeOf);
+
+ if (isMainRuntime()) {
+ rtSizes->sharedImmutableStringsCache +=
+ js::SharedImmutableStringsCache::getSingleton().sizeOfExcludingThis(
+ mallocSizeOf);
+ rtSizes->atomsTable +=
+ js::frontend::WellKnownParserAtoms::getSingleton().sizeOfExcludingThis(
+ mallocSizeOf);
+ }
+
+#ifdef JS_HAS_INTL_API
+ rtSizes->sharedIntlData +=
+ sharedIntlData.ref().sizeOfExcludingThis(mallocSizeOf);
+#endif
+
+ {
+ auto& table = scriptDataTableHolder().getWithoutLock();
+
+ rtSizes->scriptData += table.shallowSizeOfExcludingThis(mallocSizeOf);
+ for (SharedImmutableScriptDataTable::Range r = table.all(); !r.empty();
+ r.popFront()) {
+ rtSizes->scriptData += r.front()->sizeOfIncludingThis(mallocSizeOf);
+ }
+ }
+
+ if (isMainRuntime()) {
+ AutoLockGlobalScriptData lock;
+
+ auto& table = js::globalSharedScriptDataTableHolder.get(lock);
+
+ rtSizes->scriptData += table.shallowSizeOfExcludingThis(mallocSizeOf);
+ for (SharedImmutableScriptDataTable::Range r = table.all(); !r.empty();
+ r.popFront()) {
+ rtSizes->scriptData += r.front()->sizeOfIncludingThis(mallocSizeOf);
+ }
+ }
+
+ if (jitRuntime_) {
+ // Sizes of the IonCompileTasks we are holding for lazy linking
+ for (auto* task : jitRuntime_->ionLazyLinkList(this)) {
+ rtSizes->jitLazyLink += task->sizeOfExcludingThis(mallocSizeOf);
+ }
+ }
+
+ rtSizes->wasmRuntime +=
+ wasmInstances.lock()->sizeOfExcludingThis(mallocSizeOf);
+}
+
+static bool HandleInterrupt(JSContext* cx, bool invokeCallback) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+
+ cx->runtime()->gc.gcIfRequested();
+
+ // A worker thread may have requested an interrupt after finishing an Ion
+ // compilation.
+ jit::AttachFinishedCompilations(cx);
+
+ // Don't call the interrupt callback if we only interrupted for GC or Ion.
+ if (!invokeCallback) {
+ return true;
+ }
+
+ // Important: Additional callbacks can occur inside the callback handler
+ // if it re-enters the JS engine. The embedding must ensure that the
+ // callback is disconnected before attempting such re-entry.
+ if (cx->interruptCallbackDisabled) {
+ return true;
+ }
+
+ bool stop = false;
+ for (JSInterruptCallback cb : cx->interruptCallbacks()) {
+ if (!cb(cx)) {
+ stop = true;
+ }
+ }
+
+ if (!stop) {
+ // Debugger treats invoking the interrupt callback as a "step", so
+ // invoke the onStep handler.
+ if (cx->realm()->isDebuggee()) {
+ ScriptFrameIter iter(cx);
+ if (!iter.done() && cx->compartment() == iter.compartment() &&
+ DebugAPI::stepModeEnabled(iter.script())) {
+ if (!DebugAPI::onSingleStep(cx)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+ // No need to set aside any pending exception here: ComputeStackString
+ // already does that.
+ JSString* stack = ComputeStackString(cx);
+
+ UniqueTwoByteChars stringChars;
+ if (stack) {
+ stringChars = JS_CopyStringCharsZ(cx, stack);
+ if (!stringChars) {
+ cx->recoverFromOutOfMemory();
+ }
+ }
+
+ const char16_t* chars;
+ if (stringChars) {
+ chars = stringChars.get();
+ } else {
+ chars = u"(stack not available)";
+ }
+ WarnNumberUC(cx, JSMSG_TERMINATED, chars);
+ return false;
+}
+
+void JSContext::requestInterrupt(InterruptReason reason) {
+ interruptBits_ |= uint32_t(reason);
+ jitStackLimit = JS::NativeStackLimitMin;
+
+ if (reason == InterruptReason::CallbackUrgent) {
+ // If this interrupt is urgent (slow script dialog for instance), take
+ // additional steps to interrupt corner cases where the above fields are
+ // not regularly polled.
+ FutexThread::lock();
+ if (fx.isWaiting()) {
+ fx.notify(FutexThread::NotifyForJSInterrupt);
+ }
+ fx.unlock();
+ wasm::InterruptRunningCode(this);
+ }
+}
+
+bool JSContext::handleInterrupt() {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
+ if (hasAnyPendingInterrupt() || jitStackLimit == JS::NativeStackLimitMin) {
+ bool invokeCallback =
+ hasPendingInterrupt(InterruptReason::CallbackUrgent) ||
+ hasPendingInterrupt(InterruptReason::CallbackCanWait);
+ interruptBits_ = 0;
+ resetJitStackLimit();
+ return HandleInterrupt(this, invokeCallback);
+ }
+ return true;
+}
+
+bool JSRuntime::setDefaultLocale(const char* locale) {
+ if (!locale) {
+ return false;
+ }
+
+ UniqueChars newLocale = DuplicateString(mainContextFromOwnThread(), locale);
+ if (!newLocale) {
+ return false;
+ }
+
+ defaultLocale.ref() = std::move(newLocale);
+ return true;
+}
+
+void JSRuntime::resetDefaultLocale() { defaultLocale = nullptr; }
+
+const char* JSRuntime::getDefaultLocale() {
+ if (defaultLocale.ref()) {
+ return defaultLocale.ref().get();
+ }
+
+ // Use ICU if available to retrieve the default locale, this ensures ICU's
+ // default locale matches our default locale.
+#if JS_HAS_INTL_API
+ const char* locale = mozilla::intl::Locale::GetDefaultLocale();
+#else
+ const char* locale = setlocale(LC_ALL, nullptr);
+#endif
+
+ // convert to a well-formed BCP 47 language tag
+ if (!locale || !strcmp(locale, "C")) {
+ locale = "und";
+ }
+
+ UniqueChars lang = DuplicateString(mainContextFromOwnThread(), locale);
+ if (!lang) {
+ return nullptr;
+ }
+
+ char* p;
+ if ((p = strchr(lang.get(), '.'))) {
+ *p = '\0';
+ }
+ while ((p = strchr(lang.get(), '_'))) {
+ *p = '-';
+ }
+
+ defaultLocale.ref() = std::move(lang);
+ return defaultLocale.ref().get();
+}
+
+#ifdef JS_HAS_INTL_API
+void JSRuntime::traceSharedIntlData(JSTracer* trc) {
+ sharedIntlData.ref().trace(trc);
+}
+#endif
+
+SharedScriptDataTableHolder& JSRuntime::scriptDataTableHolder() {
+ // NOTE: Assert that this is not helper thread.
+ // worker thread also has access to the per-runtime table holder.
+ MOZ_ASSERT(CurrentThreadIsMainThread());
+ return scriptDataTableHolder_;
+}
+
+GlobalObject* JSRuntime::getIncumbentGlobal(JSContext* cx) {
+ MOZ_ASSERT(cx->jobQueue);
+
+ JSObject* obj = cx->jobQueue->getIncumbentGlobal(cx);
+ if (!obj) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(obj->is<GlobalObject>(),
+ "getIncumbentGlobalCallback must return a global!");
+ return &obj->as<GlobalObject>();
+}
+
+bool JSRuntime::enqueuePromiseJob(JSContext* cx, HandleFunction job,
+ HandleObject promise,
+ Handle<GlobalObject*> incumbentGlobal) {
+ MOZ_ASSERT(cx->jobQueue,
+ "Must select a JobQueue implementation using JS::JobQueue "
+ "or js::UseInternalJobQueues before using Promises");
+
+ RootedObject allocationSite(cx);
+ if (promise) {
+#ifdef DEBUG
+ AssertSameCompartment(job, promise);
+#endif
+
+ RootedObject unwrappedPromise(cx, promise);
+ // While the job object is guaranteed to be unwrapped, the promise
+ // might be wrapped. See the comments in EnqueuePromiseReactionJob in
+ // builtin/Promise.cpp for details.
+ if (IsWrapper(promise)) {
+ unwrappedPromise = UncheckedUnwrap(promise);
+ }
+ if (unwrappedPromise->is<PromiseObject>()) {
+ allocationSite = JS::GetPromiseAllocationSite(unwrappedPromise);
+ }
+ }
+ return cx->jobQueue->enqueuePromiseJob(cx, promise, job, allocationSite,
+ incumbentGlobal);
+}
+
+void JSRuntime::addUnhandledRejectedPromise(JSContext* cx,
+ js::HandleObject promise) {
+ MOZ_ASSERT(promise->is<PromiseObject>());
+ if (!cx->promiseRejectionTrackerCallback) {
+ return;
+ }
+
+ bool mutedErrors = false;
+ if (JSScript* script = cx->currentScript()) {
+ mutedErrors = script->mutedErrors();
+ }
+
+ void* data = cx->promiseRejectionTrackerCallbackData;
+ cx->promiseRejectionTrackerCallback(
+ cx, mutedErrors, promise, JS::PromiseRejectionHandlingState::Unhandled,
+ data);
+}
+
+void JSRuntime::removeUnhandledRejectedPromise(JSContext* cx,
+ js::HandleObject promise) {
+ MOZ_ASSERT(promise->is<PromiseObject>());
+ if (!cx->promiseRejectionTrackerCallback) {
+ return;
+ }
+
+ bool mutedErrors = false;
+ if (JSScript* script = cx->currentScript()) {
+ mutedErrors = script->mutedErrors();
+ }
+
+ void* data = cx->promiseRejectionTrackerCallbackData;
+ cx->promiseRejectionTrackerCallback(
+ cx, mutedErrors, promise, JS::PromiseRejectionHandlingState::Handled,
+ data);
+}
+
+mozilla::non_crypto::XorShift128PlusRNG& JSRuntime::randomKeyGenerator() {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(this));
+ if (randomKeyGenerator_.isNothing()) {
+ mozilla::Array<uint64_t, 2> seed;
+ GenerateXorShift128PlusSeed(seed);
+ randomKeyGenerator_.emplace(seed[0], seed[1]);
+ }
+ return randomKeyGenerator_.ref();
+}
+
+mozilla::HashCodeScrambler JSRuntime::randomHashCodeScrambler() {
+ auto& rng = randomKeyGenerator();
+ return mozilla::HashCodeScrambler(rng.next(), rng.next());
+}
+
+mozilla::non_crypto::XorShift128PlusRNG JSRuntime::forkRandomKeyGenerator() {
+ auto& rng = randomKeyGenerator();
+ return mozilla::non_crypto::XorShift128PlusRNG(rng.next(), rng.next());
+}
+
+js::HashNumber JSRuntime::randomHashCode() {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(this));
+
+ if (randomHashCodeGenerator_.isNothing()) {
+ mozilla::Array<uint64_t, 2> seed;
+ GenerateXorShift128PlusSeed(seed);
+ randomHashCodeGenerator_.emplace(seed[0], seed[1]);
+ }
+
+ return HashNumber(randomHashCodeGenerator_->next());
+}
+
+JS_PUBLIC_API void* JSRuntime::onOutOfMemory(AllocFunction allocFunc,
+ arena_id_t arena, size_t nbytes,
+ void* reallocPtr,
+ JSContext* maybecx) {
+ MOZ_ASSERT_IF(allocFunc != AllocFunction::Realloc, !reallocPtr);
+
+ if (JS::RuntimeHeapIsBusy()) {
+ return nullptr;
+ }
+
+ if (!oom::IsSimulatedOOMAllocation()) {
+ /*
+ * Retry when we are done with the background sweeping and have stopped
+ * all the allocations and released the empty GC chunks.
+ */
+ gc.onOutOfMallocMemory();
+ void* p;
+ switch (allocFunc) {
+ case AllocFunction::Malloc:
+ p = js_arena_malloc(arena, nbytes);
+ break;
+ case AllocFunction::Calloc:
+ p = js_arena_calloc(arena, nbytes, 1);
+ break;
+ case AllocFunction::Realloc:
+ p = js_arena_realloc(arena, reallocPtr, nbytes);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ if (p) {
+ return p;
+ }
+ }
+
+ if (maybecx) {
+ ReportOutOfMemory(maybecx);
+ }
+ return nullptr;
+}
+
+void* JSRuntime::onOutOfMemoryCanGC(AllocFunction allocFunc, arena_id_t arena,
+ size_t bytes, void* reallocPtr) {
+ if (OnLargeAllocationFailure && bytes >= LARGE_ALLOCATION) {
+ OnLargeAllocationFailure();
+ }
+ return onOutOfMemory(allocFunc, arena, bytes, reallocPtr);
+}
+
+bool JSRuntime::activeGCInAtomsZone() {
+ Zone* zone = unsafeAtomsZone();
+ return (zone->needsIncrementalBarrier() &&
+ !gc.isVerifyPreBarriersEnabled()) ||
+ zone->wasGCStarted();
+}
+
+void JSRuntime::incrementNumDebuggeeRealms() {
+ if (numDebuggeeRealms_ == 0) {
+ jitRuntime()->baselineInterpreter().toggleDebuggerInstrumentation(true);
+ }
+
+ numDebuggeeRealms_++;
+ MOZ_ASSERT(numDebuggeeRealms_ <= numRealms);
+}
+
+void JSRuntime::decrementNumDebuggeeRealms() {
+ MOZ_ASSERT(numDebuggeeRealms_ > 0);
+ numDebuggeeRealms_--;
+
+ // Note: if we had shutdown leaks we can end up here while destroying the
+ // runtime. It's not safe to access JitRuntime trampolines because they're no
+ // longer traced.
+ if (numDebuggeeRealms_ == 0 && !isBeingDestroyed()) {
+ jitRuntime()->baselineInterpreter().toggleDebuggerInstrumentation(false);
+ }
+}
+
+void JSRuntime::incrementNumDebuggeeRealmsObservingCoverage() {
+ if (numDebuggeeRealmsObservingCoverage_ == 0) {
+ jit::BaselineInterpreter& interp = jitRuntime()->baselineInterpreter();
+ interp.toggleCodeCoverageInstrumentation(true);
+ }
+
+ numDebuggeeRealmsObservingCoverage_++;
+ MOZ_ASSERT(numDebuggeeRealmsObservingCoverage_ <= numRealms);
+}
+
+void JSRuntime::decrementNumDebuggeeRealmsObservingCoverage() {
+ MOZ_ASSERT(numDebuggeeRealmsObservingCoverage_ > 0);
+ numDebuggeeRealmsObservingCoverage_--;
+
+ // Note: if we had shutdown leaks we can end up here while destroying the
+ // runtime. It's not safe to access JitRuntime trampolines because they're no
+ // longer traced.
+ if (numDebuggeeRealmsObservingCoverage_ == 0 && !isBeingDestroyed()) {
+ jit::BaselineInterpreter& interp = jitRuntime()->baselineInterpreter();
+ interp.toggleCodeCoverageInstrumentation(false);
+ }
+}
+
+bool js::CurrentThreadCanAccessRuntime(const JSRuntime* rt) {
+ return rt->mainContextFromAnyThread() == TlsContext.get();
+}
+
+bool js::CurrentThreadCanAccessZone(Zone* zone) {
+ return CurrentThreadCanAccessRuntime(zone->runtime_);
+}
+
+#ifdef DEBUG
+bool js::CurrentThreadIsMainThread() {
+ JSContext* cx = TlsContext.get();
+ return cx && cx->isMainThreadContext();
+}
+#endif
+
+JS_PUBLIC_API void JS::SetJSContextProfilerSampleBufferRangeStart(
+ JSContext* cx, uint64_t rangeStart) {
+ cx->runtime()->setProfilerSampleBufferRangeStart(rangeStart);
+}
+
+JS_PUBLIC_API bool JS::IsProfilingEnabledForContext(JSContext* cx) {
+ MOZ_ASSERT(cx);
+ return cx->runtime()->geckoProfiler().enabled();
+}
+
+JS_PUBLIC_API void JS::EnableRecordingAllocations(
+ JSContext* cx, JS::RecordAllocationsCallback callback, double probability) {
+ MOZ_ASSERT(cx);
+ MOZ_ASSERT(cx->isMainThreadContext());
+ cx->runtime()->startRecordingAllocations(probability, callback);
+}
+
+JS_PUBLIC_API void JS::DisableRecordingAllocations(JSContext* cx) {
+ MOZ_ASSERT(cx);
+ MOZ_ASSERT(cx->isMainThreadContext());
+ cx->runtime()->stopRecordingAllocations();
+}
+
+JS_PUBLIC_API void JS::shadow::RegisterWeakCache(
+ JSRuntime* rt, detail::WeakCacheBase* cachep) {
+ rt->registerWeakCache(cachep);
+}
+
+void JSRuntime::startRecordingAllocations(
+ double probability, JS::RecordAllocationsCallback callback) {
+ allocationSamplingProbability = probability;
+ recordAllocationCallback = callback;
+
+ // Go through all of the existing realms, and turn on allocation tracking.
+ for (RealmsIter realm(this); !realm.done(); realm.next()) {
+ realm->setAllocationMetadataBuilder(&SavedStacks::metadataBuilder);
+ realm->chooseAllocationSamplingProbability();
+ }
+}
+
+void JSRuntime::stopRecordingAllocations() {
+ recordAllocationCallback = nullptr;
+ // Go through all of the existing realms, and turn on allocation tracking.
+ for (RealmsIter realm(this); !realm.done(); realm.next()) {
+ js::GlobalObject* global = realm->maybeGlobal();
+ if (!realm->isDebuggee() || !global ||
+ !DebugAPI::isObservedByDebuggerTrackingAllocations(*global)) {
+ // Only remove the allocation metadata builder if no Debuggers are
+ // tracking allocations.
+ realm->forgetAllocationMetadataBuilder();
+ }
+ }
+}
+
+// This function can run to ensure that when new realms are created
+// they have allocation logging turned on.
+void JSRuntime::ensureRealmIsRecordingAllocations(
+ Handle<GlobalObject*> global) {
+ if (recordAllocationCallback) {
+ if (!global->realm()->isRecordingAllocations()) {
+ // This is a new realm, turn on allocations for it.
+ global->realm()->setAllocationMetadataBuilder(
+ &SavedStacks::metadataBuilder);
+ }
+ // Ensure the probability is up to date with the current combination of
+ // debuggers and runtime profiling.
+ global->realm()->chooseAllocationSamplingProbability();
+ }
+}
diff --git a/js/src/vm/Runtime.h b/js/src/vm/Runtime.h
new file mode 100644
index 0000000000..0ceca19042
--- /dev/null
+++ b/js/src/vm/Runtime.h
@@ -0,0 +1,1144 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Runtime_h
+#define vm_Runtime_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/DoublyLinkedList.h"
+#include "mozilla/LinkedList.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/XorShift128PlusRNG.h"
+
+#include <algorithm>
+
+#ifdef JS_HAS_INTL_API
+# include "builtin/intl/SharedIntlData.h"
+#endif
+#include "frontend/ScriptIndex.h"
+#include "gc/GCRuntime.h"
+#include "js/AllocationRecording.h"
+#include "js/BuildId.h" // JS::BuildIdOp
+#include "js/Context.h"
+#include "js/experimental/CTypes.h" // JS::CTypesActivityCallback
+#include "js/friend/StackLimits.h" // js::ReportOverRecursed
+#include "js/friend/UsageStatistics.h" // JSAccumulateTelemetryDataCallback
+#include "js/GCVector.h"
+#include "js/HashTable.h"
+#include "js/Initialization.h"
+#include "js/MemoryCallbacks.h"
+#include "js/Modules.h" // JS::Module{DynamicImport,Metadata,Resolve}Hook
+#include "js/ScriptPrivate.h"
+#include "js/shadow/Zone.h"
+#include "js/ShadowRealmCallbacks.h"
+#include "js/Stack.h"
+#include "js/StreamConsumer.h"
+#include "js/Symbol.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "js/WaitCallbacks.h"
+#include "js/Warnings.h" // JS::WarningReporter
+#include "js/Zone.h"
+#include "vm/Caches.h" // js::RuntimeCaches
+#include "vm/CodeCoverage.h"
+#include "vm/GeckoProfiler.h"
+#include "vm/JSScript.h"
+#include "vm/OffThreadPromiseRuntimeState.h" // js::OffThreadPromiseRuntimeState
+#include "vm/SharedScriptDataTableHolder.h" // js::SharedScriptDataTableHolder
+#include "vm/Stack.h"
+#include "wasm/WasmTypeDecls.h"
+
+struct JSAtomState;
+struct JSClass;
+struct JSErrorInterceptor;
+struct JSWrapObjectCallbacks;
+
+namespace js {
+
+class AutoAssertNoContentJS;
+class Debugger;
+class EnterDebuggeeNoExecute;
+class FrontendContext;
+class PlainObject;
+class StaticStrings;
+
+} // namespace js
+
+struct DtoaState;
+struct JSLocaleCallbacks;
+
+#ifdef JS_SIMULATOR_ARM64
+namespace vixl {
+class Simulator;
+}
+#endif
+
+namespace js {
+
+extern MOZ_COLD void ReportOutOfMemory(JSContext* cx);
+extern MOZ_COLD void ReportAllocationOverflow(JSContext* maybecx);
+extern MOZ_COLD void ReportAllocationOverflow(FrontendContext* fc);
+extern MOZ_COLD void ReportOversizedAllocation(JSContext* cx,
+ const unsigned errorNumber);
+
+class Activation;
+class ActivationIterator;
+class Shape;
+class SourceHook;
+
+namespace jit {
+class JitRuntime;
+class JitActivation;
+struct PcScriptCache;
+class CompileRuntime;
+
+#ifdef JS_SIMULATOR_ARM64
+typedef vixl::Simulator Simulator;
+#elif defined(JS_SIMULATOR)
+class Simulator;
+#endif
+} // namespace jit
+
+namespace frontend {
+struct CompilationInput;
+struct CompilationStencil;
+} // namespace frontend
+
+// [SMDOC] JS Engine Threading
+//
+// Threads interacting with a runtime are divided into two categories:
+//
+// - The main thread is capable of running JS. There's at most one main thread
+// per runtime.
+//
+// - Helper threads do not run JS, and are controlled or triggered by activity
+// on the main thread (or main threads, since all runtimes in a process share
+// helper threads). Helper threads may have exclusive access to zones created
+// for them, for parsing and similar tasks, but their activities do not cause
+// observable changes in script behaviors. Activity on helper threads may be
+// referred to as happening 'off thread' or on a background thread in some
+// parts of the VM.
+
+} /* namespace js */
+
+namespace JS {
+struct RuntimeSizes;
+} // namespace JS
+
+namespace js {
+
+/*
+ * Storage for well-known symbols. It's a separate struct from the Runtime so
+ * that it can be shared across multiple runtimes. As in JSAtomState, each
+ * field is a smart pointer that's immutable once initialized.
+ * `rt->wellKnownSymbols->iterator` is convertible to Handle<Symbol*>.
+ *
+ * Well-known symbols are never GC'd. The description() of each well-known
+ * symbol is a permanent atom.
+ */
+struct WellKnownSymbols {
+#define DECLARE_SYMBOL(name) ImmutableTenuredPtr<JS::Symbol*> name;
+ JS_FOR_EACH_WELL_KNOWN_SYMBOL(DECLARE_SYMBOL)
+#undef DECLARE_SYMBOL
+
+ const ImmutableTenuredPtr<JS::Symbol*>& get(size_t u) const {
+ MOZ_ASSERT(u < JS::WellKnownSymbolLimit);
+ const ImmutableTenuredPtr<JS::Symbol*>* symbols =
+ reinterpret_cast<const ImmutableTenuredPtr<JS::Symbol*>*>(this);
+ return symbols[u];
+ }
+
+ const ImmutableTenuredPtr<JS::Symbol*>& get(JS::SymbolCode code) const {
+ return get(size_t(code));
+ }
+
+ WellKnownSymbols() = default;
+ WellKnownSymbols(const WellKnownSymbols&) = delete;
+ WellKnownSymbols& operator=(const WellKnownSymbols&) = delete;
+};
+
+// There are several coarse locks in the enum below. These may be either
+// per-runtime or per-process. When acquiring more than one of these locks,
+// the acquisition must be done in the order below to avoid deadlocks.
+enum RuntimeLock { HelperThreadStateLock, GCLock };
+
+inline bool CanUseExtraThreads() {
+ extern bool gCanUseExtraThreads;
+ return gCanUseExtraThreads;
+}
+
+void DisableExtraThreads();
+
+using ScriptAndCountsVector = GCVector<ScriptAndCounts, 0, SystemAllocPolicy>;
+
+class AutoLockScriptData;
+
+// Self-hosted lazy functions do not maintain a BaseScript as we can clone from
+// the copy in the self-hosting zone. To allow these functions to be called by
+// the JITs, we need a minimal script object. There is one instance per runtime.
+struct SelfHostedLazyScript {
+ SelfHostedLazyScript() = default;
+
+ // Pointer to interpreter trampoline. This field is stored at same location as
+ // in BaseScript::jitCodeRaw_.
+ uint8_t* jitCodeRaw_ = nullptr;
+
+ // Warm-up count of zero. This field is stored at the same offset as
+ // BaseScript::warmUpData_.
+ ScriptWarmUpData warmUpData_ = {};
+
+ static constexpr size_t offsetOfJitCodeRaw() {
+ return offsetof(SelfHostedLazyScript, jitCodeRaw_);
+ }
+ static constexpr size_t offsetOfWarmUpData() {
+ return offsetof(SelfHostedLazyScript, warmUpData_);
+ }
+};
+
+// An interface for reporting telemetry from within SpiderMonkey. Reporting data
+// to this interface will forward it to the embedding if a telemetry callback
+// was registered. It is the embedding's responsibility to store and/or combine
+// repeated samples for each metric.
+class Metrics {
+ private:
+ JSRuntime* rt_;
+
+ public:
+ explicit Metrics(JSRuntime* rt) : rt_(rt) {}
+
+ // Records a TimeDuration metric. These are converted to integers when being
+ // recorded so choose an appropriate scale. In the future these will be Glean
+ // Timing Distribution metrics.
+ struct TimeDuration_S {
+ using SourceType = mozilla::TimeDuration;
+ static uint32_t convert(SourceType td) { return uint32_t(td.ToSeconds()); }
+ };
+ struct TimeDuration_MS {
+ using SourceType = mozilla::TimeDuration;
+ static uint32_t convert(SourceType td) {
+ return uint32_t(td.ToMilliseconds());
+ }
+ };
+ struct TimeDuration_US {
+ using SourceType = mozilla::TimeDuration;
+ static uint32_t convert(SourceType td) {
+ return uint32_t(td.ToMicroseconds());
+ }
+ };
+
+ // Record a metric in bytes. In the future these will be Glean Memory
+ // Distribution metrics.
+ struct MemoryDistribution {
+ using SourceType = size_t;
+ static uint32_t convert(SourceType sz) {
+ return static_cast<uint32_t>(std::min(sz, size_t(UINT32_MAX)));
+ }
+ };
+
+ // Record a metric for a quanity of items. This doesn't currently have a Glean
+ // analogue and we avoid using MemoryDistribution directly to avoid confusion
+ // about units.
+ using QuantityDistribution = MemoryDistribution;
+
+ // Record the distribution of boolean values. In the future this will be a
+ // Glean Rate metric.
+ struct Boolean {
+ using SourceType = bool;
+ static uint32_t convert(SourceType sample) {
+ return static_cast<uint32_t>(sample);
+ }
+ };
+
+ // Record the distribution of an enumeration value. This records integer
+ // values so take care not to redefine the value of enum values. In the
+ // future, these should become Glean Labeled Counter metrics.
+ struct Enumeration {
+ using SourceType = int;
+ static uint32_t convert(SourceType sample) {
+ MOZ_ASSERT(sample >= 0 && sample <= 100);
+ return static_cast<uint32_t>(sample);
+ }
+ };
+
+ // Record a percentage distribution which is an integer in the range 0 to 100.
+ // In the future, this will be a Glean Custom Distribution unless they add a
+ // better match.
+ struct Percentage {
+ using SourceType = int;
+ static uint32_t convert(SourceType sample) {
+ MOZ_ASSERT(sample >= 0 && sample <= 100);
+ return static_cast<uint32_t>(sample);
+ }
+ };
+
+ // Record an unsigned integer.
+ struct Integer {
+ using SourceType = uint32_t;
+ static uint32_t convert(SourceType sample) { return sample; }
+ };
+
+ inline void addTelemetry(JSMetric id, uint32_t sample);
+
+#define DECLARE_METRIC_HELPER(NAME, TY) \
+ void NAME(TY::SourceType sample) { \
+ addTelemetry(JSMetric::NAME, TY::convert(sample)); \
+ }
+ FOR_EACH_JS_METRIC(DECLARE_METRIC_HELPER)
+#undef DECLARE_METRIC_HELPER
+};
+
+} // namespace js
+
+struct JSRuntime {
+ private:
+ friend class js::Activation;
+ friend class js::ActivationIterator;
+ friend class js::jit::JitActivation;
+ friend class js::jit::CompileRuntime;
+
+ /* Space for interpreter frames. */
+ js::MainThreadData<js::InterpreterStack> interpreterStack_;
+
+ public:
+ js::InterpreterStack& interpreterStack() { return interpreterStack_.ref(); }
+
+ /*
+ * If non-null, another runtime guaranteed to outlive this one and whose
+ * permanent data may be used by this one where possible.
+ */
+ JSRuntime* const parentRuntime;
+
+ bool isMainRuntime() const { return !parentRuntime; }
+
+#ifdef DEBUG
+ /* The number of child runtimes that have this runtime as their parent. */
+ mozilla::Atomic<size_t> childRuntimeCount;
+
+ class AutoUpdateChildRuntimeCount {
+ JSRuntime* parent_;
+
+ public:
+ explicit AutoUpdateChildRuntimeCount(JSRuntime* parent) : parent_(parent) {
+ if (parent_) {
+ parent_->childRuntimeCount++;
+ }
+ }
+
+ ~AutoUpdateChildRuntimeCount() {
+ if (parent_) {
+ parent_->childRuntimeCount--;
+ }
+ }
+ };
+
+ AutoUpdateChildRuntimeCount updateChildRuntimeCount;
+#endif
+
+ private:
+#ifdef DEBUG
+ js::WriteOnceData<bool> initialized_;
+#endif
+
+ // The JSContext* for the runtime's main thread. Immutable after this is set
+ // in JSRuntime::init.
+ JSContext* mainContext_;
+
+ public:
+ JSContext* mainContextFromAnyThread() const { return mainContext_; }
+ const void* addressOfMainContext() { return &mainContext_; }
+ js::Fprinter parserWatcherFile;
+
+ inline JSContext* mainContextFromOwnThread();
+
+ js::Metrics metrics() { return js::Metrics(this); }
+
+ /*
+ * The start of the range stored in the profiler sample buffer, as measured
+ * after the most recent sample.
+ * All JitcodeGlobalTable entries referenced from a given sample are
+ * assigned the buffer position of the START of the sample. The buffer
+ * entries that reference the JitcodeGlobalTable entries will only ever be
+ * read from the buffer while the entire sample is still inside the buffer;
+ * if some buffer entries at the start of the sample have left the buffer,
+ * the entire sample will be considered inaccessible.
+ * This means that, once profilerSampleBufferRangeStart_ advances beyond
+ * the sample position that's stored on a JitcodeGlobalTable entry, the
+ * buffer entries that reference this JitcodeGlobalTable entry will be
+ * considered inaccessible, and those JitcodeGlobalTable entry can be
+ * disposed of.
+ */
+ mozilla::Atomic<uint64_t, mozilla::ReleaseAcquire>
+ profilerSampleBufferRangeStart_;
+
+ mozilla::Maybe<uint64_t> profilerSampleBufferRangeStart() {
+ if (beingDestroyed_ || !geckoProfiler().enabled()) {
+ return mozilla::Nothing();
+ }
+ uint64_t rangeStart = profilerSampleBufferRangeStart_;
+ return mozilla::Some(rangeStart);
+ }
+ void setProfilerSampleBufferRangeStart(uint64_t rangeStart) {
+ profilerSampleBufferRangeStart_ = rangeStart;
+ }
+
+ /* Call this to accumulate telemetry data. May be called from any thread; the
+ * embedder is responsible for locking. */
+ JSAccumulateTelemetryDataCallback telemetryCallback;
+
+ /* Call this to accumulate use counter data. */
+ js::MainThreadData<JSSetUseCounterCallback> useCounterCallback;
+
+ public:
+ // Accumulates data for Firefox telemetry.
+ void addTelemetry(JSMetric id, uint32_t sample);
+
+ void setTelemetryCallback(JSRuntime* rt,
+ JSAccumulateTelemetryDataCallback callback);
+
+ // Sets the use counter for a specific feature, measuring the presence or
+ // absence of usage of a feature on a specific web page and document which
+ // the passed JSObject belongs to.
+ void setUseCounter(JSObject* obj, JSUseCounter counter);
+
+ void setUseCounterCallback(JSRuntime* rt, JSSetUseCounterCallback callback);
+
+ public:
+ js::UnprotectedData<js::OffThreadPromiseRuntimeState> offThreadPromiseState;
+ js::UnprotectedData<JS::ConsumeStreamCallback> consumeStreamCallback;
+ js::UnprotectedData<JS::ReportStreamErrorCallback> reportStreamErrorCallback;
+
+ js::GlobalObject* getIncumbentGlobal(JSContext* cx);
+ bool enqueuePromiseJob(JSContext* cx, js::HandleFunction job,
+ js::HandleObject promise,
+ js::Handle<js::GlobalObject*> incumbentGlobal);
+ void addUnhandledRejectedPromise(JSContext* cx, js::HandleObject promise);
+ void removeUnhandledRejectedPromise(JSContext* cx, js::HandleObject promise);
+
+ /* Had an out-of-memory error which did not populate an exception. */
+ mozilla::Atomic<bool, mozilla::SequentiallyConsistent> hadOutOfMemory;
+
+ /*
+ * Allow relazifying functions in compartments that are active. This is
+ * only used by the relazifyFunctions() testing function.
+ */
+ js::MainThreadData<bool> allowRelazificationForTesting;
+
+ /* Zone destroy callback. */
+ js::MainThreadData<JSDestroyZoneCallback> destroyZoneCallback;
+
+ /* Compartment destroy callback. */
+ js::MainThreadData<JSDestroyCompartmentCallback> destroyCompartmentCallback;
+
+ /* Compartment memory reporting callback. */
+ js::MainThreadData<JSSizeOfIncludingThisCompartmentCallback>
+ sizeOfIncludingThisCompartmentCallback;
+
+ /* Callback for creating ubi::Nodes representing DOM node objects. Set by
+ * JS::ubi::SetConstructUbiNodeForDOMObjectCallback. Refer to
+ * js/public/UbiNode.h.
+ */
+ void (*constructUbiNodeForDOMObjectCallback)(void*, JSObject*) = nullptr;
+
+ /* Realm destroy callback. */
+ js::MainThreadData<JS::DestroyRealmCallback> destroyRealmCallback;
+
+ /* Call this to get the name of a realm. */
+ js::MainThreadData<JS::RealmNameCallback> realmNameCallback;
+
+ js::MainThreadData<mozilla::UniquePtr<js::SourceHook>> sourceHook;
+
+ js::MainThreadData<const JSSecurityCallbacks*> securityCallbacks;
+ js::MainThreadData<const js::DOMCallbacks*> DOMcallbacks;
+ js::MainThreadData<JSDestroyPrincipalsOp> destroyPrincipals;
+ js::MainThreadData<JSReadPrincipalsOp> readPrincipals;
+
+ js::MainThreadData<JS::EnsureCanAddPrivateElementOp> canAddPrivateElement;
+
+ /* Optional warning reporter. */
+ js::MainThreadData<JS::WarningReporter> warningReporter;
+
+ // Lazy self-hosted functions use a shared SelfHostedLazyScript instance
+ // instead instead of a BaseScript. This contains the minimal pointers to
+ // trampolines for the scripts to support direct jitCodeRaw calls.
+ js::UnprotectedData<js::SelfHostedLazyScript> selfHostedLazyScript;
+
+ private:
+ // The self-hosted JS code is compiled as a Stencil which is then attached to
+ // the Runtime. This is used to instantiate functions into realms on demand.
+ js::WriteOnceData<js::frontend::CompilationInput*> selfHostStencilInput_;
+ js::WriteOnceData<js::frontend::CompilationStencil*> selfHostStencil_;
+
+ public:
+ // The self-hosted stencil is immutable once attached to the runtime, so
+ // worker runtimes directly use the stencil on the parent runtime.
+ js::frontend::CompilationInput& selfHostStencilInput() {
+ MOZ_ASSERT(hasSelfHostStencil());
+ return *selfHostStencilInput_.ref();
+ }
+ js::frontend::CompilationStencil& selfHostStencil() {
+ MOZ_ASSERT(hasSelfHostStencil());
+ return *selfHostStencil_.ref();
+ }
+ bool hasSelfHostStencil() const { return bool(selfHostStencil_.ref()); }
+
+ // A mapping from the name of self-hosted function to a ScriptIndex range of
+ // the function and inner-functions within the self-hosted stencil.
+ js::MainThreadData<
+ JS::GCHashMap<js::PreBarriered<JSAtom*>, js::frontend::ScriptIndexRange,
+ js::DefaultHasher<JSAtom*>, js::SystemAllocPolicy>>
+ selfHostScriptMap;
+
+ private:
+ /* Gecko profiling metadata */
+ js::UnprotectedData<js::GeckoProfilerRuntime> geckoProfiler_;
+
+ public:
+ js::GeckoProfilerRuntime& geckoProfiler() { return geckoProfiler_.ref(); }
+
+ // Heap GC roots for PersistentRooted pointers.
+ js::MainThreadData<
+ mozilla::EnumeratedArray<JS::RootKind, JS::RootKind::Limit,
+ mozilla::LinkedList<js::PersistentRootedBase>>>
+ heapRoots;
+
+ void tracePersistentRoots(JSTracer* trc);
+ void finishPersistentRoots();
+
+ void finishRoots();
+
+ private:
+ js::UnprotectedData<const JSPrincipals*> trustedPrincipals_;
+
+ public:
+ void setTrustedPrincipals(const JSPrincipals* p) { trustedPrincipals_ = p; }
+ const JSPrincipals* trustedPrincipals() const { return trustedPrincipals_; }
+
+ js::MainThreadData<const JSWrapObjectCallbacks*> wrapObjectCallbacks;
+ js::MainThreadData<js::PreserveWrapperCallback> preserveWrapperCallback;
+ js::MainThreadData<js::HasReleasedWrapperCallback> hasReleasedWrapperCallback;
+
+ js::MainThreadData<js::ScriptEnvironmentPreparer*> scriptEnvironmentPreparer;
+
+ js::MainThreadData<JS::CTypesActivityCallback> ctypesActivityCallback;
+
+ private:
+ js::WriteOnceData<const JSClass*> windowProxyClass_;
+
+ public:
+ const JSClass* maybeWindowProxyClass() const { return windowProxyClass_; }
+ void setWindowProxyClass(const JSClass* clasp) { windowProxyClass_ = clasp; }
+
+ private:
+ // List of non-ephemeron weak containers to sweep during
+ // beginSweepingSweepGroup.
+ js::MainThreadData<mozilla::LinkedList<JS::detail::WeakCacheBase>>
+ weakCaches_;
+
+ public:
+ mozilla::LinkedList<JS::detail::WeakCacheBase>& weakCaches() {
+ return weakCaches_.ref();
+ }
+ void registerWeakCache(JS::detail::WeakCacheBase* cachep) {
+ weakCaches().insertBack(cachep);
+ }
+
+ template <typename T>
+ struct GlobalObjectWatchersLinkAccess {
+ static mozilla::DoublyLinkedListElement<T>& Get(T* aThis) {
+ return aThis->onNewGlobalObjectWatchersLink;
+ }
+ };
+
+ template <typename T>
+ struct GarbageCollectionWatchersLinkAccess {
+ static mozilla::DoublyLinkedListElement<T>& Get(T* aThis) {
+ return aThis->onGarbageCollectionWatchersLink;
+ }
+ };
+
+ using OnNewGlobalWatchersList =
+ mozilla::DoublyLinkedList<js::Debugger,
+ GlobalObjectWatchersLinkAccess<js::Debugger>>;
+ using OnGarbageCollectionWatchersList = mozilla::DoublyLinkedList<
+ js::Debugger, GarbageCollectionWatchersLinkAccess<js::Debugger>>;
+
+ private:
+ /*
+ * List of all enabled Debuggers that have onNewGlobalObject handler
+ * methods established.
+ */
+ js::MainThreadData<OnNewGlobalWatchersList> onNewGlobalObjectWatchers_;
+
+ /*
+ * List of all enabled Debuggers that have onGarbageCollection handler
+ * methods established.
+ */
+ js::MainThreadData<OnGarbageCollectionWatchersList>
+ onGarbageCollectionWatchers_;
+
+ public:
+ OnNewGlobalWatchersList& onNewGlobalObjectWatchers() {
+ return onNewGlobalObjectWatchers_.ref();
+ }
+
+ OnGarbageCollectionWatchersList& onGarbageCollectionWatchers() {
+ return onGarbageCollectionWatchers_.ref();
+ }
+
+ private:
+ /* Linked list of all Debugger objects in the runtime. */
+ js::MainThreadData<mozilla::LinkedList<js::Debugger>> debuggerList_;
+
+ public:
+ mozilla::LinkedList<js::Debugger>& debuggerList() {
+ return debuggerList_.ref();
+ }
+
+ public:
+ JS::HeapState heapState() const { return gc.heapState(); }
+
+ // How many realms there are across all zones. This number includes
+ // off-thread context realms, so it isn't necessarily equal to the
+ // number of realms visited by RealmsIter.
+ js::MainThreadData<size_t> numRealms;
+
+ // The Gecko Profiler may want to sample the allocations happening across the
+ // browser. This callback can be registered to record the allocation.
+ js::MainThreadData<JS::RecordAllocationsCallback> recordAllocationCallback;
+ js::MainThreadData<double> allocationSamplingProbability;
+
+ private:
+ // Number of debuggee realms in the runtime.
+ js::MainThreadData<size_t> numDebuggeeRealms_;
+
+ // Number of debuggee realms in the runtime observing code coverage.
+ js::MainThreadData<size_t> numDebuggeeRealmsObservingCoverage_;
+
+ public:
+ void incrementNumDebuggeeRealms();
+ void decrementNumDebuggeeRealms();
+
+ size_t numDebuggeeRealms() const { return numDebuggeeRealms_; }
+
+ void incrementNumDebuggeeRealmsObservingCoverage();
+ void decrementNumDebuggeeRealmsObservingCoverage();
+
+ void startRecordingAllocations(double probability,
+ JS::RecordAllocationsCallback callback);
+ void stopRecordingAllocations();
+ void ensureRealmIsRecordingAllocations(JS::Handle<js::GlobalObject*> global);
+
+ /* Locale-specific callbacks for string conversion. */
+ js::MainThreadData<const JSLocaleCallbacks*> localeCallbacks;
+
+ /* Default locale for Internationalization API */
+ js::MainThreadData<js::UniqueChars> defaultLocale;
+
+ /* If true, new scripts must be created with PC counter information. */
+ js::MainThreadOrIonCompileData<bool> profilingScripts;
+
+ /* Strong references on scripts held for PCCount profiling API. */
+ js::MainThreadData<JS::PersistentRooted<js::ScriptAndCountsVector>*>
+ scriptAndCountsVector;
+
+ using RootedPlainObjVec = JS::PersistentRooted<
+ JS::GCVector<js::PlainObject*, 0, js::SystemAllocPolicy>>;
+ js::MainThreadData<js::UniquePtr<RootedPlainObjVec>> watchtowerTestingLog;
+
+ private:
+ /* Code coverage output. */
+ js::UnprotectedData<js::coverage::LCovRuntime> lcovOutput_;
+
+ public:
+ js::coverage::LCovRuntime& lcovOutput() { return lcovOutput_.ref(); }
+
+ private:
+ js::UnprotectedData<js::jit::JitRuntime*> jitRuntime_;
+
+ public:
+ mozilla::Maybe<js::frontend::ScriptIndexRange> getSelfHostedScriptIndexRange(
+ js::PropertyName* name);
+
+ [[nodiscard]] bool createJitRuntime(JSContext* cx);
+ js::jit::JitRuntime* jitRuntime() const { return jitRuntime_.ref(); }
+ bool hasJitRuntime() const { return !!jitRuntime_; }
+
+ private:
+ // Used to generate random keys for hash tables.
+ mozilla::Maybe<mozilla::non_crypto::XorShift128PlusRNG> randomKeyGenerator_;
+ mozilla::non_crypto::XorShift128PlusRNG& randomKeyGenerator();
+
+ // Used to generate random hash codes for symbols.
+ mozilla::Maybe<mozilla::non_crypto::XorShift128PlusRNG>
+ randomHashCodeGenerator_;
+
+ public:
+ mozilla::HashCodeScrambler randomHashCodeScrambler();
+ mozilla::non_crypto::XorShift128PlusRNG forkRandomKeyGenerator();
+
+ js::HashNumber randomHashCode();
+
+ //-------------------------------------------------------------------------
+ // Self-hosting support
+ //-------------------------------------------------------------------------
+
+ bool hasInitializedSelfHosting() const { return hasSelfHostStencil(); }
+
+ bool initSelfHostingStencil(JSContext* cx, JS::SelfHostedCache xdrCache,
+ JS::SelfHostedWriter xdrWriter);
+ bool initSelfHostingFromStencil(JSContext* cx);
+ void finishSelfHosting();
+ void traceSelfHostingStencil(JSTracer* trc);
+ js::GeneratorKind getSelfHostedFunctionGeneratorKind(js::PropertyName* name);
+ bool delazifySelfHostedFunction(JSContext* cx,
+ js::Handle<js::PropertyName*> name,
+ js::Handle<JSFunction*> targetFun);
+ bool getSelfHostedValue(JSContext* cx, js::Handle<js::PropertyName*> name,
+ js::MutableHandleValue vp);
+ void assertSelfHostedFunctionHasCanonicalName(
+ JS::Handle<js::PropertyName*> name);
+
+ private:
+ void setSelfHostingStencil(
+ JS::MutableHandle<js::UniquePtr<js::frontend::CompilationInput>> input,
+ RefPtr<js::frontend::CompilationStencil>&& stencil);
+
+ //-------------------------------------------------------------------------
+ // Locale information
+ //-------------------------------------------------------------------------
+
+ public:
+ /*
+ * Set the default locale for the ECMAScript Internationalization API
+ * (Intl.Collator, Intl.NumberFormat, Intl.DateTimeFormat).
+ * Note that the Internationalization API encourages clients to
+ * specify their own locales.
+ * The locale string remains owned by the caller.
+ */
+ bool setDefaultLocale(const char* locale);
+
+ /* Reset the default locale to OS defaults. */
+ void resetDefaultLocale();
+
+ /* Gets current default locale. String remains owned by context. */
+ const char* getDefaultLocale();
+
+ /* Garbage collector state. */
+ js::gc::GCRuntime gc;
+
+ /* Garbage collector state has been successfully initialized. */
+
+ bool hasZealMode(js::gc::ZealMode mode) { return gc.hasZealMode(mode); }
+
+ void lockGC() { gc.lockGC(); }
+
+ void unlockGC() { gc.unlockGC(); }
+
+ js::WriteOnceData<js::PropertyName*> emptyString;
+
+ public:
+ JS::GCContext* gcContext() { return &gc.mainThreadContext.ref(); }
+
+#if !JS_HAS_INTL_API
+ /* Number localization, used by jsnum.cpp. */
+ js::WriteOnceData<const char*> thousandsSeparator;
+ js::WriteOnceData<const char*> decimalSeparator;
+ js::WriteOnceData<const char*> numGrouping;
+#endif
+
+ private:
+ js::WriteOnceData<bool> beingDestroyed_;
+
+ public:
+ bool isBeingDestroyed() const { return beingDestroyed_; }
+
+ private:
+ bool allowContentJS_;
+
+ public:
+ bool allowContentJS() const { return allowContentJS_; }
+
+ friend class js::AutoAssertNoContentJS;
+
+ private:
+ // Table of all atoms other than those in permanentAtoms and staticStrings.
+ js::WriteOnceData<js::AtomsTable*> atoms_;
+
+ // Set of all live symbols produced by Symbol.for(). All such symbols are
+ // allocated in the atoms zone. Reading or writing the symbol registry
+ // can only be done from the main thread.
+ js::MainThreadOrGCTaskData<js::SymbolRegistry> symbolRegistry_;
+
+ js::WriteOnceData<js::FrozenAtomSet*> permanentAtoms_;
+
+ public:
+ bool initializeAtoms(JSContext* cx);
+ void finishAtoms();
+ bool atomsAreFinished() const { return !atoms_; }
+
+ js::AtomsTable* atomsForSweeping() {
+ MOZ_ASSERT(JS::RuntimeHeapIsCollecting());
+ return atoms_;
+ }
+
+ js::AtomsTable& atoms() {
+ MOZ_ASSERT(atoms_);
+ return *atoms_;
+ }
+
+ JS::Zone* atomsZone() {
+ MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(this));
+ return unsafeAtomsZone();
+ }
+ JS::Zone* unsafeAtomsZone() { return gc.atomsZone(); }
+
+#ifdef DEBUG
+ bool isAtomsZone(const JS::Zone* zone) const {
+ return JS::shadow::Zone::from(zone)->isAtomsZone();
+ }
+#endif
+
+ bool activeGCInAtomsZone();
+
+ js::SymbolRegistry& symbolRegistry() { return symbolRegistry_.ref(); }
+
+ // Permanent atoms are fixed during initialization of the runtime and are
+ // not modified or collected until the runtime is destroyed. These may be
+ // shared with another, longer living runtime through |parentRuntime| and
+ // can be freely accessed with no locking necessary.
+
+ // Permanent atoms pre-allocated for general use.
+ js::WriteOnceData<js::StaticStrings*> staticStrings;
+
+ // Cached pointers to various permanent property names.
+ js::WriteOnceData<JSAtomState*> commonNames;
+
+ // All permanent atoms in the runtime, other than those in staticStrings.
+ // Access to this does not require a lock because it is frozen and thus
+ // read-only.
+ const js::FrozenAtomSet* permanentAtoms() const {
+ MOZ_ASSERT(permanentAtomsPopulated());
+ return permanentAtoms_.ref();
+ }
+
+ // The permanent atoms table is populated during initialization.
+ bool permanentAtomsPopulated() const { return permanentAtoms_; }
+
+ // Cached well-known symbols (ES6 rev 24 6.1.5.1). Like permanent atoms,
+ // these are shared with the parentRuntime, if any.
+ js::WriteOnceData<js::WellKnownSymbols*> wellKnownSymbols;
+
+#ifdef JS_HAS_INTL_API
+ /* Shared Intl data for this runtime. */
+ js::MainThreadData<js::intl::SharedIntlData> sharedIntlData;
+
+ void traceSharedIntlData(JSTracer* trc);
+#endif
+
+ private:
+ js::SharedScriptDataTableHolder scriptDataTableHolder_;
+
+ public:
+ // Returns the runtime's local script data table holder.
+ js::SharedScriptDataTableHolder& scriptDataTableHolder();
+
+ private:
+ static mozilla::Atomic<size_t> liveRuntimesCount;
+
+ public:
+ static bool hasLiveRuntimes() { return liveRuntimesCount > 0; }
+ static bool hasSingleLiveRuntime() { return liveRuntimesCount == 1; }
+
+ explicit JSRuntime(JSRuntime* parentRuntime);
+ ~JSRuntime();
+
+ // destroyRuntime is used instead of a destructor, to ensure the downcast
+ // to JSContext remains valid. The final GC triggered here depends on this.
+ void destroyRuntime();
+
+ bool init(JSContext* cx, uint32_t maxbytes);
+
+ JSRuntime* thisFromCtor() { return this; }
+
+ private:
+ // Number of live SharedArrayBuffer objects, including those in Wasm shared
+ // memories. uint64_t to avoid any risk of overflow.
+ js::MainThreadData<uint64_t> liveSABs;
+
+ public:
+ void incSABCount() {
+ MOZ_RELEASE_ASSERT(liveSABs != UINT64_MAX);
+ liveSABs++;
+ }
+
+ void decSABCount() {
+ MOZ_RELEASE_ASSERT(liveSABs > 0);
+ liveSABs--;
+ }
+
+ bool hasLiveSABs() const { return liveSABs > 0; }
+
+ public:
+ js::MainThreadData<JS::BeforeWaitCallback> beforeWaitCallback;
+ js::MainThreadData<JS::AfterWaitCallback> afterWaitCallback;
+
+ public:
+ void reportAllocationOverflow() {
+ js::ReportAllocationOverflow(static_cast<JSContext*>(nullptr));
+ }
+
+ /*
+ * This should be called after system malloc/calloc/realloc returns nullptr
+ * to try to recove some memory or to report an error. For realloc, the
+ * original pointer must be passed as reallocPtr.
+ *
+ * The function must be called outside the GC lock.
+ */
+ JS_PUBLIC_API void* onOutOfMemory(js::AllocFunction allocator,
+ arena_id_t arena, size_t nbytes,
+ void* reallocPtr = nullptr,
+ JSContext* maybecx = nullptr);
+
+ /* onOutOfMemory but can call OnLargeAllocationFailure. */
+ JS_PUBLIC_API void* onOutOfMemoryCanGC(js::AllocFunction allocator,
+ arena_id_t arena, size_t nbytes,
+ void* reallocPtr = nullptr);
+
+ static const unsigned LARGE_ALLOCATION = 25 * 1024 * 1024;
+
+ void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::RuntimeSizes* rtSizes);
+
+ private:
+ // Settings for how helper threads can be used.
+ mozilla::Atomic<bool, mozilla::SequentiallyConsistent>
+ offthreadIonCompilationEnabled_;
+ mozilla::Atomic<bool, mozilla::SequentiallyConsistent>
+ parallelParsingEnabled_;
+
+ js::MainThreadData<bool> autoWritableJitCodeActive_;
+
+ public:
+ // Note: these values may be toggled dynamically (in response to about:config
+ // prefs changing).
+ void setOffthreadIonCompilationEnabled(bool value) {
+ offthreadIonCompilationEnabled_ = value;
+ }
+ bool canUseOffthreadIonCompilation() const {
+ return offthreadIonCompilationEnabled_;
+ }
+ void setParallelParsingEnabled(bool value) {
+ parallelParsingEnabled_ = value;
+ }
+ bool canUseParallelParsing() const { return parallelParsingEnabled_; }
+
+ void toggleAutoWritableJitCodeActive(bool b) {
+ MOZ_ASSERT(autoWritableJitCodeActive_ != b,
+ "AutoWritableJitCode should not be nested.");
+ autoWritableJitCodeActive_ = b;
+ }
+
+ /* See comment for JS::SetOutOfMemoryCallback in js/MemoryCallbacks.h. */
+ js::MainThreadData<JS::OutOfMemoryCallback> oomCallback;
+ js::MainThreadData<void*> oomCallbackData;
+
+ /*
+ * Debugger.Memory functions like takeCensus use this embedding-provided
+ * function to assess the size of malloc'd blocks of memory.
+ */
+ js::MainThreadData<mozilla::MallocSizeOf> debuggerMallocSizeOf;
+
+ /* Last time at which an animation was played for this runtime. */
+ js::MainThreadData<mozilla::TimeStamp> lastAnimationTime;
+
+ private:
+ /* The stack format for the current runtime. Only valid on non-child
+ * runtimes. */
+ mozilla::Atomic<js::StackFormat, mozilla::ReleaseAcquire> stackFormat_;
+
+ public:
+ js::StackFormat stackFormat() const {
+ const JSRuntime* rt = this;
+ while (rt->parentRuntime) {
+ MOZ_ASSERT(rt->stackFormat_ == js::StackFormat::Default);
+ rt = rt->parentRuntime;
+ }
+ MOZ_ASSERT(rt->stackFormat_ != js::StackFormat::Default);
+ return rt->stackFormat_;
+ }
+ void setStackFormat(js::StackFormat format) {
+ MOZ_ASSERT(!parentRuntime);
+ MOZ_ASSERT(format != js::StackFormat::Default);
+ stackFormat_ = format;
+ }
+
+ private:
+ // Warning: no data should be accessed in these caches from another thread,
+ // but Ion needs to be able to access addresses inside here, which should be
+ // safe, as the actual cache lookups will be performed on the main thread
+ // through jitted code.
+ js::MainThreadOrParseOrIonCompileData<js::RuntimeCaches> caches_;
+
+ public:
+ js::RuntimeCaches& caches() { return caches_.ref(); }
+
+ // List of all the live wasm::Instances in the runtime. Equal to the union
+ // of all instances registered in all JS::Realms. Accessed from watchdog
+ // threads for purposes of wasm::InterruptRunningCode().
+ js::ExclusiveData<js::wasm::InstanceVector> wasmInstances;
+
+ // A counter used when recording the order in which modules had their
+ // AsyncEvaluation field set to true. This is used to order queued
+ // evaluations. This is reset when the last module that was async evaluating
+ // is finished.
+ //
+ // See https://tc39.es/ecma262/#sec-async-module-execution-fulfilled step 10
+ // for use.
+ js::MainThreadData<uint32_t> moduleAsyncEvaluatingPostOrder;
+
+ // The implementation-defined abstract operation HostResolveImportedModule.
+ js::MainThreadData<JS::ModuleResolveHook> moduleResolveHook;
+
+ // A hook that implements the abstract operations
+ // HostGetImportMetaProperties and HostFinalizeImportMeta.
+ js::MainThreadData<JS::ModuleMetadataHook> moduleMetadataHook;
+
+ // A hook that implements the abstract operation
+ // HostImportModuleDynamically. This is also used to enable/disable dynamic
+ // module import and can accessed by off-thread parsing.
+ mozilla::Atomic<JS::ModuleDynamicImportHook> moduleDynamicImportHook;
+
+ // The supported module import assertions.
+ // https://tc39.es/proposal-import-assertions/#sec-hostgetsupportedimportassertions
+ js::MainThreadOrParseData<JS::ImportAssertionVector>
+ supportedImportAssertions;
+
+ // Hooks called when script private references are created and destroyed.
+ js::MainThreadData<JS::ScriptPrivateReferenceHook> scriptPrivateAddRefHook;
+ js::MainThreadData<JS::ScriptPrivateReferenceHook> scriptPrivateReleaseHook;
+
+ void addRefScriptPrivate(const JS::Value& value) {
+ if (!value.isUndefined() && scriptPrivateAddRefHook) {
+ scriptPrivateAddRefHook(value);
+ }
+ }
+
+ void releaseScriptPrivate(const JS::Value& value) {
+ if (!value.isUndefined() && scriptPrivateReleaseHook) {
+ scriptPrivateReleaseHook(value);
+ }
+ }
+
+ public:
+#if defined(NIGHTLY_BUILD)
+ // Support for informing the embedding of any error thrown.
+ // This mechanism is designed to let the embedding
+ // log/report/fail in case certain errors are thrown
+ // (e.g. SyntaxError, ReferenceError or TypeError
+ // in critical code).
+ struct ErrorInterceptionSupport {
+ ErrorInterceptionSupport() : isExecuting(false), interceptor(nullptr) {}
+
+ // true if the error interceptor is currently executing,
+ // false otherwise. Used to avoid infinite loops.
+ bool isExecuting;
+
+ // if non-null, any call to `setPendingException`
+ // in this runtime will trigger the call to `interceptor`
+ JSErrorInterceptor* interceptor;
+ };
+ ErrorInterceptionSupport errorInterception;
+#endif // defined(NIGHTLY_BUILD)
+
+ public:
+ JS::GlobalInitializeCallback getShadowRealmInitializeGlobalCallback() {
+ return shadowRealmInitializeGlobalCallback;
+ }
+
+ JS::GlobalCreationCallback getShadowRealmGlobalCreationCallback() {
+ return shadowRealmGlobalCreationCallback;
+ }
+
+ js::MainThreadData<JS::GlobalInitializeCallback>
+ shadowRealmInitializeGlobalCallback;
+
+ js::MainThreadData<JS::GlobalCreationCallback>
+ shadowRealmGlobalCreationCallback;
+};
+
+namespace js {
+
+void Metrics::addTelemetry(JSMetric id, uint32_t sample) {
+ rt_->addTelemetry(id, sample);
+}
+
+static MOZ_ALWAYS_INLINE void MakeRangeGCSafe(Value* vec, size_t len) {
+ // Don't PodZero here because JS::Value is non-trivial.
+ for (size_t i = 0; i < len; i++) {
+ vec[i].setDouble(+0.0);
+ }
+}
+
+static MOZ_ALWAYS_INLINE void MakeRangeGCSafe(Value* beg, Value* end) {
+ MakeRangeGCSafe(beg, end - beg);
+}
+
+static MOZ_ALWAYS_INLINE void MakeRangeGCSafe(jsid* beg, jsid* end) {
+ std::fill(beg, end, PropertyKey::Int(0));
+}
+
+static MOZ_ALWAYS_INLINE void MakeRangeGCSafe(jsid* vec, size_t len) {
+ MakeRangeGCSafe(vec, vec + len);
+}
+
+static MOZ_ALWAYS_INLINE void MakeRangeGCSafe(Shape** beg, Shape** end) {
+ std::fill(beg, end, nullptr);
+}
+
+static MOZ_ALWAYS_INLINE void MakeRangeGCSafe(Shape** vec, size_t len) {
+ MakeRangeGCSafe(vec, vec + len);
+}
+
+static MOZ_ALWAYS_INLINE void SetValueRangeToUndefined(Value* beg, Value* end) {
+ for (Value* v = beg; v != end; ++v) {
+ v->setUndefined();
+ }
+}
+
+static MOZ_ALWAYS_INLINE void SetValueRangeToUndefined(Value* vec, size_t len) {
+ SetValueRangeToUndefined(vec, vec + len);
+}
+
+static MOZ_ALWAYS_INLINE void SetValueRangeToNull(Value* beg, Value* end) {
+ for (Value* v = beg; v != end; ++v) {
+ v->setNull();
+ }
+}
+
+static MOZ_ALWAYS_INLINE void SetValueRangeToNull(Value* vec, size_t len) {
+ SetValueRangeToNull(vec, vec + len);
+}
+
+extern const JSSecurityCallbacks NullSecurityCallbacks;
+
+// This callback is set by JS::SetProcessLargeAllocationFailureCallback
+// and may be null. See comment in jsapi.h.
+extern mozilla::Atomic<JS::LargeAllocationFailureCallback>
+ OnLargeAllocationFailure;
+
+// This callback is set by JS::SetBuildIdOp and may be null. See comment in
+// jsapi.h.
+extern mozilla::Atomic<JS::BuildIdOp> GetBuildId;
+
+extern JS::FilenameValidationCallback gFilenameValidationCallback;
+
+} /* namespace js */
+
+#endif /* vm_Runtime_h */
diff --git a/js/src/vm/SavedFrame.h b/js/src/vm/SavedFrame.h
new file mode 100644
index 0000000000..a67594fec9
--- /dev/null
+++ b/js/src/vm/SavedFrame.h
@@ -0,0 +1,297 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_SavedFrame_h
+#define vm_SavedFrame_h
+
+#include "mozilla/Attributes.h"
+
+#include "gc/Policy.h"
+#include "js/GCHashTable.h"
+#include "js/Principals.h"
+#include "js/UbiNode.h"
+#include "vm/NativeObject.h"
+
+namespace js {
+
+class SavedFrame : public NativeObject {
+ friend class SavedStacks;
+ friend struct ::JSStructuredCloneReader;
+
+ static const ClassSpec classSpec_;
+
+ public:
+ static const JSClass class_;
+ static const JSClass protoClass_;
+ static const JSPropertySpec protoAccessors[];
+ static const JSFunctionSpec protoFunctions[];
+ static const JSFunctionSpec staticFunctions[];
+
+ // Prototype methods and properties to be exposed to JS.
+ static bool construct(JSContext* cx, unsigned argc, Value* vp);
+ static bool sourceProperty(JSContext* cx, unsigned argc, Value* vp);
+ static bool sourceIdProperty(JSContext* cx, unsigned argc, Value* vp);
+ static bool lineProperty(JSContext* cx, unsigned argc, Value* vp);
+ static bool columnProperty(JSContext* cx, unsigned argc, Value* vp);
+ static bool functionDisplayNameProperty(JSContext* cx, unsigned argc,
+ Value* vp);
+ static bool asyncCauseProperty(JSContext* cx, unsigned argc, Value* vp);
+ static bool asyncParentProperty(JSContext* cx, unsigned argc, Value* vp);
+ static bool parentProperty(JSContext* cx, unsigned argc, Value* vp);
+ static bool toStringMethod(JSContext* cx, unsigned argc, Value* vp);
+
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+
+ // Convenient getters for SavedFrame's reserved slots for use from C++.
+ JSAtom* getSource();
+ uint32_t getSourceId();
+ uint32_t getLine();
+ uint32_t getColumn();
+ JSAtom* getFunctionDisplayName();
+ JSAtom* getAsyncCause();
+ SavedFrame* getParent() const;
+ JSPrincipals* getPrincipals();
+ bool getMutedErrors();
+ bool isSelfHosted(JSContext* cx);
+ bool isWasm();
+
+ // When isWasm():
+ uint32_t wasmFuncIndex();
+ uint32_t wasmBytecodeOffset();
+
+ // Iterator for use with C++11 range based for loops, eg:
+ //
+ // Rooted<SavedFrame*> stack(cx, getSomeSavedFrameStack());
+ // for (Handle<SavedFrame*> frame : SavedFrame::RootedRange(cx, stack)) {
+ // ...
+ // }
+ //
+ // Each frame yielded by `SavedFrame::RootedRange` is only a valid handle to
+ // a rooted `SavedFrame` within the loop's block for a single loop
+ // iteration. When the next iteration begins, the value is invalidated.
+
+ class RootedRange;
+
+ class MOZ_STACK_CLASS RootedIterator {
+ friend class RootedRange;
+ RootedRange* range_;
+ // For use by RootedRange::end() only.
+ explicit RootedIterator() : range_(nullptr) {}
+
+ public:
+ explicit RootedIterator(RootedRange& range) : range_(&range) {}
+ Handle<SavedFrame*> operator*() {
+ MOZ_ASSERT(range_);
+ return range_->frame_;
+ }
+ bool operator!=(const RootedIterator& rhs) const {
+ // We should only ever compare to the null range, aka we are just
+ // testing if this range is done.
+ MOZ_ASSERT(rhs.range_ == nullptr);
+ return range_->frame_ != nullptr;
+ }
+ inline void operator++();
+ };
+
+ class MOZ_STACK_CLASS RootedRange {
+ friend class RootedIterator;
+ Rooted<SavedFrame*> frame_;
+
+ public:
+ RootedRange(JSContext* cx, Handle<SavedFrame*> frame) : frame_(cx, frame) {}
+ RootedIterator begin() { return RootedIterator(*this); }
+ RootedIterator end() { return RootedIterator(); }
+ };
+
+ struct Lookup;
+ struct HashPolicy;
+
+ typedef JS::GCHashSet<WeakHeapPtr<SavedFrame*>, HashPolicy, SystemAllocPolicy>
+ Set;
+
+ private:
+ static SavedFrame* create(JSContext* cx);
+ [[nodiscard]] static bool finishSavedFrameInit(JSContext* cx,
+ HandleObject ctor,
+ HandleObject proto);
+ void initFromLookup(JSContext* cx, Handle<Lookup> lookup);
+ void initSource(JSAtom* source);
+ void initSourceId(uint32_t id);
+ void initLine(uint32_t line);
+ void initColumn(uint32_t column);
+ void initFunctionDisplayName(JSAtom* maybeName);
+ void initAsyncCause(JSAtom* maybeCause);
+ void initParent(SavedFrame* maybeParent);
+ void initPrincipalsAlreadyHeldAndMutedErrors(JSPrincipals* principals,
+ bool mutedErrors);
+ void initPrincipalsAndMutedErrors(JSPrincipals* principals, bool mutedErrors);
+
+ enum {
+ // The reserved slots in the SavedFrame class.
+ JSSLOT_SOURCE,
+ JSSLOT_SOURCEID,
+ JSSLOT_LINE,
+ JSSLOT_COLUMN,
+ JSSLOT_FUNCTIONDISPLAYNAME,
+ JSSLOT_ASYNCCAUSE,
+ JSSLOT_PARENT,
+ JSSLOT_PRINCIPALS,
+
+ // The total number of reserved slots in the SavedFrame class.
+ JSSLOT_COUNT
+ };
+};
+
+struct SavedFrame::HashPolicy {
+ using Lookup = SavedFrame::Lookup;
+ using SavedFramePtrHasher = StableCellHasher<SavedFrame*>;
+ using JSPrincipalsPtrHasher = PointerHasher<JSPrincipals*>;
+
+ static bool maybeGetHash(const Lookup& l, HashNumber* hashOut);
+ static bool ensureHash(const Lookup& l, HashNumber* hashOut);
+ static HashNumber hash(const Lookup& lookup);
+ static bool match(SavedFrame* existing, const Lookup& lookup);
+
+ using Key = WeakHeapPtr<SavedFrame*>;
+ static void rekey(Key& key, const Key& newKey);
+
+ private:
+ static HashNumber calculateHash(const Lookup& lookup, HashNumber parentHash);
+};
+
+} // namespace js
+
+namespace mozilla {
+
+template <>
+struct FallibleHashMethods<js::SavedFrame::HashPolicy> {
+ template <typename Lookup>
+ static bool maybeGetHash(Lookup&& l, HashNumber* hashOut) {
+ return js::SavedFrame::HashPolicy::maybeGetHash(std::forward<Lookup>(l),
+ hashOut);
+ }
+ template <typename Lookup>
+ static bool ensureHash(Lookup&& l, HashNumber* hashOut) {
+ return js::SavedFrame::HashPolicy::ensureHash(std::forward<Lookup>(l),
+ hashOut);
+ }
+};
+
+} // namespace mozilla
+
+namespace js {
+
+// Assert that if the given object is not null, that it must be either a
+// SavedFrame object or wrapper (Xray or CCW) around a SavedFrame object.
+inline void AssertObjectIsSavedFrameOrWrapper(JSContext* cx,
+ HandleObject stack);
+
+// When we reconstruct a SavedFrame stack from a JS::ubi::StackFrame, we may not
+// have access to the principals that the original stack was captured
+// with. Instead, we use these two singleton principals based on whether
+// JS::ubi::StackFrame::isSystem or not. These singletons should never be passed
+// to the subsumes callback, and should be special cased with a shortcut before
+// that.
+struct ReconstructedSavedFramePrincipals : public JSPrincipals {
+ explicit ReconstructedSavedFramePrincipals() : JSPrincipals() {
+ MOZ_ASSERT(is(this));
+ this->refcount = 1;
+ }
+
+ [[nodiscard]] bool write(JSContext* cx,
+ JSStructuredCloneWriter* writer) override {
+ MOZ_ASSERT(false,
+ "ReconstructedSavedFramePrincipals should never be exposed to "
+ "embedders");
+ return false;
+ }
+
+ bool isSystemOrAddonPrincipal() override {
+ MOZ_ASSERT(false,
+ "ReconstructedSavedFramePrincipals should never be exposed to "
+ "embedders");
+ return false;
+ }
+
+ static ReconstructedSavedFramePrincipals IsSystem;
+ static ReconstructedSavedFramePrincipals IsNotSystem;
+
+ // Return true if the given JSPrincipals* points to one of the
+ // ReconstructedSavedFramePrincipals singletons, false otherwise.
+ static bool is(JSPrincipals* p) {
+ return p == &IsSystem || p == &IsNotSystem;
+ }
+
+ // Get the appropriate ReconstructedSavedFramePrincipals singleton for the
+ // given JS::ubi::StackFrame that is being reconstructed as a SavedFrame
+ // stack.
+ static JSPrincipals* getSingleton(JS::ubi::StackFrame& f) {
+ return f.isSystem() ? &IsSystem : &IsNotSystem;
+ }
+};
+
+inline void SavedFrame::RootedIterator::operator++() {
+ MOZ_ASSERT(range_);
+ range_->frame_ = range_->frame_->getParent();
+}
+
+} // namespace js
+
+namespace JS {
+namespace ubi {
+
+using js::SavedFrame;
+
+// A concrete JS::ubi::StackFrame that is backed by a live SavedFrame object.
+template <>
+class ConcreteStackFrame<SavedFrame> : public BaseStackFrame {
+ explicit ConcreteStackFrame(SavedFrame* ptr) : BaseStackFrame(ptr) {}
+ SavedFrame& get() const { return *static_cast<SavedFrame*>(ptr); }
+
+ public:
+ static void construct(void* storage, SavedFrame* ptr) {
+ new (storage) ConcreteStackFrame(ptr);
+ }
+
+ StackFrame parent() const override { return get().getParent(); }
+ uint32_t line() const override { return get().getLine(); }
+ uint32_t column() const override { return get().getColumn(); }
+
+ AtomOrTwoByteChars source() const override {
+ auto source = get().getSource();
+ return AtomOrTwoByteChars(source);
+ }
+
+ uint32_t sourceId() const override { return get().getSourceId(); }
+
+ AtomOrTwoByteChars functionDisplayName() const override {
+ auto name = get().getFunctionDisplayName();
+ return AtomOrTwoByteChars(name);
+ }
+
+ void trace(JSTracer* trc) override {
+ JSObject* prev = &get();
+ JSObject* next = prev;
+ js::TraceRoot(trc, &next, "ConcreteStackFrame<SavedFrame>::ptr");
+ if (next != prev) {
+ ptr = next;
+ }
+ }
+
+ bool isSelfHosted(JSContext* cx) const override {
+ return get().isSelfHosted(cx);
+ }
+
+ bool isSystem() const override;
+
+ [[nodiscard]] bool constructSavedFrameStack(
+ JSContext* cx, MutableHandleObject outSavedFrameStack) const override;
+};
+
+} // namespace ubi
+} // namespace JS
+
+#endif // vm_SavedFrame_h
diff --git a/js/src/vm/SavedStacks-inl.h b/js/src/vm/SavedStacks-inl.h
new file mode 100644
index 0000000000..176d7e7272
--- /dev/null
+++ b/js/src/vm/SavedStacks-inl.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_SavedStacksInl_h
+#define vm_SavedStacksInl_h
+
+#include "vm/SavedStacks.h"
+
+// Assert that if the given object is not null, it's Class is the
+// SavedFrame::class_ or the given object is a cross-compartment or Xray wrapper
+// around such an object.
+//
+// We allow wrappers here because the JSAPI functions for working with
+// SavedFrame objects and the SavedFrame accessors themselves handle wrappers
+// and use the original caller's compartment's principals to determine what
+// level of data to present. Unwrapping and entering the referent's compartment
+// would mess that up. See the module level documentation in
+// `js/src/vm/SavedStacks.h` as well as the comments in `js/src/jsapi.h`.
+inline void js::AssertObjectIsSavedFrameOrWrapper(JSContext* cx,
+ HandleObject stack) {
+ if (stack) {
+ MOZ_RELEASE_ASSERT(stack->canUnwrapAs<SavedFrame>());
+ }
+}
+
+#endif // vm_SavedStacksInl_h
diff --git a/js/src/vm/SavedStacks.cpp b/js/src/vm/SavedStacks.cpp
new file mode 100644
index 0000000000..bb5a239048
--- /dev/null
+++ b/js/src/vm/SavedStacks.cpp
@@ -0,0 +1,2097 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/SavedStacks.h"
+
+#include "mozilla/Attributes.h"
+#include "mozilla/DebugOnly.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "jsapi.h"
+#include "jsmath.h"
+#include "jsnum.h"
+
+#include "gc/GCContext.h"
+#include "gc/HashUtil.h"
+#include "js/CharacterEncoding.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/PropertyAndElement.h" // JS_DefineProperty, JS_GetProperty
+#include "js/PropertySpec.h"
+#include "js/SavedFrameAPI.h"
+#include "js/Stack.h"
+#include "js/Vector.h"
+#include "util/DifferentialTesting.h"
+#include "util/StringBuffer.h"
+#include "vm/Compartment.h"
+#include "vm/FrameIter.h"
+#include "vm/GeckoProfiler.h"
+#include "vm/JSScript.h"
+#include "vm/Realm.h"
+#include "vm/SavedFrame.h"
+#include "vm/WrapperObject.h"
+
+#include "debugger/DebugAPI-inl.h"
+#include "gc/StableCellHasher-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/JSContext-inl.h"
+
+using mozilla::AddToHash;
+using mozilla::DebugOnly;
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+
+namespace js {
+
+/**
+ * Maximum number of saved frames returned for an async stack.
+ */
+const uint32_t ASYNC_STACK_MAX_FRAME_COUNT = 60;
+
+void LiveSavedFrameCache::trace(JSTracer* trc) {
+ if (!initialized()) {
+ return;
+ }
+
+ for (auto* entry = frames->begin(); entry < frames->end(); entry++) {
+ TraceEdge(trc, &entry->savedFrame,
+ "LiveSavedFrameCache::frames SavedFrame");
+ }
+}
+
+bool LiveSavedFrameCache::insert(JSContext* cx, FramePtr&& framePtr,
+ const jsbytecode* pc,
+ Handle<SavedFrame*> savedFrame) {
+ MOZ_ASSERT(savedFrame);
+ MOZ_ASSERT(initialized());
+
+#ifdef DEBUG
+ // There should not already be an entry for this frame. Checking the full
+ // stack really slows down some tests, so just check the first and last five
+ // hundred.
+ size_t limit = std::min(frames->length() / 2, size_t(500));
+ for (size_t i = 0; i < limit; i++) {
+ MOZ_ASSERT(Key(framePtr) != (*frames)[i].key);
+ MOZ_ASSERT(Key(framePtr) != (*frames)[frames->length() - 1 - i].key);
+ }
+#endif
+
+ if (!frames->emplaceBack(framePtr, pc, savedFrame)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ framePtr.setHasCachedSavedFrame();
+
+ return true;
+}
+
+void LiveSavedFrameCache::find(JSContext* cx, FramePtr& framePtr,
+ const jsbytecode* pc,
+ MutableHandle<SavedFrame*> frame) const {
+ MOZ_ASSERT(initialized());
+ MOZ_ASSERT(framePtr.hasCachedSavedFrame());
+
+ // The assertions here check that either 1) frames' hasCachedSavedFrame flags
+ // accurately indicate the presence of a cache entry for that frame (ignoring
+ // pc mismatches), or 2) the cache is completely empty, having been flushed
+ // for a realm mismatch.
+
+ // If we flushed the cache due to a realm mismatch, then we shouldn't
+ // expect to find any frames in the cache.
+ if (frames->empty()) {
+ frame.set(nullptr);
+ return;
+ }
+
+ // All our SavedFrames should be in the same realm. If the last
+ // entry's SavedFrame's realm doesn't match cx's, flush the cache.
+ if (frames->back().savedFrame->realm() != cx->realm()) {
+#ifdef DEBUG
+ // Check that they are, indeed, all in the same realm.
+ auto realm = frames->back().savedFrame->realm();
+ for (const auto& f : (*frames)) {
+ MOZ_ASSERT(realm == f.savedFrame->realm());
+ }
+#endif
+ frames->clear();
+ frame.set(nullptr);
+ return;
+ }
+
+ Key key(framePtr);
+ while (key != frames->back().key) {
+ MOZ_ASSERT(frames->back().savedFrame->realm() == cx->realm());
+
+ // framePtr must have an entry, but apparently it's below this one on the
+ // stack; frames->back() must correspond to a frame younger than framePtr's.
+ // SavedStacks::insertFrames is going to push new cache entries for
+ // everything younger than framePtr, so this entry should be popped.
+ frames->popBack();
+
+ // If the frame's bit was set, the frame should always have an entry in
+ // the cache. (If we purged the entire cache because its SavedFrames had
+ // been captured for a different realm, then we would have
+ // returned early above.)
+ MOZ_RELEASE_ASSERT(!frames->empty());
+ }
+
+ // The youngest valid frame may have run some code, so its current pc may
+ // not match its cache entry's pc. In this case, just treat it as a miss. No
+ // older frame has executed any code; it would have been necessary to pop
+ // this frame for that to happen, but this frame's bit is set.
+ if (pc != frames->back().pc) {
+ frames->popBack();
+ frame.set(nullptr);
+ return;
+ }
+
+ frame.set(frames->back().savedFrame);
+}
+
+void LiveSavedFrameCache::findWithoutInvalidation(
+ const FramePtr& framePtr, MutableHandle<SavedFrame*> frame) const {
+ MOZ_ASSERT(initialized());
+ MOZ_ASSERT(framePtr.hasCachedSavedFrame());
+
+ Key key(framePtr);
+ for (auto& entry : (*frames)) {
+ if (entry.key == key) {
+ frame.set(entry.savedFrame);
+ return;
+ }
+ }
+
+ frame.set(nullptr);
+}
+
+struct MOZ_STACK_CLASS SavedFrame::Lookup {
+ Lookup(JSAtom* source, uint32_t sourceId, uint32_t line, uint32_t column,
+ JSAtom* functionDisplayName, JSAtom* asyncCause, SavedFrame* parent,
+ JSPrincipals* principals, bool mutedErrors,
+ const Maybe<LiveSavedFrameCache::FramePtr>& framePtr = Nothing(),
+ jsbytecode* pc = nullptr, Activation* activation = nullptr)
+ : source(source),
+ sourceId(sourceId),
+ line(line),
+ column(column),
+ functionDisplayName(functionDisplayName),
+ asyncCause(asyncCause),
+ parent(parent),
+ principals(principals),
+ mutedErrors(mutedErrors),
+ framePtr(framePtr),
+ pc(pc),
+ activation(activation) {
+ MOZ_ASSERT(source);
+ MOZ_ASSERT_IF(framePtr.isSome(), activation);
+ if (js::SupportDifferentialTesting()) {
+ this->column = 0;
+ }
+ }
+
+ explicit Lookup(SavedFrame& savedFrame)
+ : source(savedFrame.getSource()),
+ sourceId(savedFrame.getSourceId()),
+ line(savedFrame.getLine()),
+ column(savedFrame.getColumn()),
+ functionDisplayName(savedFrame.getFunctionDisplayName()),
+ asyncCause(savedFrame.getAsyncCause()),
+ parent(savedFrame.getParent()),
+ principals(savedFrame.getPrincipals()),
+ mutedErrors(savedFrame.getMutedErrors()),
+ framePtr(Nothing()),
+ pc(nullptr),
+ activation(nullptr) {
+ MOZ_ASSERT(source);
+ }
+
+ JSAtom* source;
+ uint32_t sourceId;
+ uint32_t line;
+ uint32_t column;
+ JSAtom* functionDisplayName;
+ JSAtom* asyncCause;
+ SavedFrame* parent;
+ JSPrincipals* principals;
+ bool mutedErrors;
+
+ // These are used only by the LiveSavedFrameCache and not used for identity or
+ // hashing.
+ Maybe<LiveSavedFrameCache::FramePtr> framePtr;
+ jsbytecode* pc;
+ Activation* activation;
+
+ void trace(JSTracer* trc) {
+ TraceRoot(trc, &source, "SavedFrame::Lookup::source");
+ TraceNullableRoot(trc, &functionDisplayName,
+ "SavedFrame::Lookup::functionDisplayName");
+ TraceNullableRoot(trc, &asyncCause, "SavedFrame::Lookup::asyncCause");
+ TraceNullableRoot(trc, &parent, "SavedFrame::Lookup::parent");
+ }
+};
+
+using GCLookupVector =
+ GCVector<SavedFrame::Lookup, ASYNC_STACK_MAX_FRAME_COUNT>;
+
+template <class Wrapper>
+class WrappedPtrOperations<SavedFrame::Lookup, Wrapper> {
+ const SavedFrame::Lookup& value() const {
+ return static_cast<const Wrapper*>(this)->get();
+ }
+
+ public:
+ JSAtom* source() { return value().source; }
+ uint32_t sourceId() { return value().sourceId; }
+ uint32_t line() { return value().line; }
+ uint32_t column() { return value().column; }
+ JSAtom* functionDisplayName() { return value().functionDisplayName; }
+ JSAtom* asyncCause() { return value().asyncCause; }
+ SavedFrame* parent() { return value().parent; }
+ JSPrincipals* principals() { return value().principals; }
+ bool mutedErrors() { return value().mutedErrors; }
+ Maybe<LiveSavedFrameCache::FramePtr> framePtr() { return value().framePtr; }
+ jsbytecode* pc() { return value().pc; }
+ Activation* activation() { return value().activation; }
+};
+
+template <typename Wrapper>
+class MutableWrappedPtrOperations<SavedFrame::Lookup, Wrapper>
+ : public WrappedPtrOperations<SavedFrame::Lookup, Wrapper> {
+ SavedFrame::Lookup& value() { return static_cast<Wrapper*>(this)->get(); }
+
+ public:
+ void setParent(SavedFrame* parent) { value().parent = parent; }
+
+ void setAsyncCause(Handle<JSAtom*> asyncCause) {
+ value().asyncCause = asyncCause;
+ }
+};
+
+/* static */
+bool SavedFrame::HashPolicy::maybeGetHash(const Lookup& l,
+ HashNumber* hashOut) {
+ HashNumber parentHash;
+ if (!SavedFramePtrHasher::maybeGetHash(l.parent, &parentHash)) {
+ return false;
+ }
+ *hashOut = calculateHash(l, parentHash);
+ return true;
+}
+
+/* static */
+bool SavedFrame::HashPolicy::ensureHash(const Lookup& l, HashNumber* hashOut) {
+ HashNumber parentHash;
+ if (!SavedFramePtrHasher::ensureHash(l.parent, &parentHash)) {
+ return false;
+ }
+ *hashOut = calculateHash(l, parentHash);
+ return true;
+}
+
+/* static */
+HashNumber SavedFrame::HashPolicy::hash(const Lookup& lookup) {
+ return calculateHash(lookup, SavedFramePtrHasher::hash(lookup.parent));
+}
+
+/* static */
+HashNumber SavedFrame::HashPolicy::calculateHash(const Lookup& lookup,
+ HashNumber parentHash) {
+ JS::AutoCheckCannotGC nogc;
+ // Assume that we can take line mod 2^32 without losing anything of
+ // interest. If that assumption changes, we'll just need to start with 0
+ // and add another overload of AddToHash with more arguments.
+ return AddToHash(lookup.line, lookup.column, lookup.source,
+ lookup.functionDisplayName, lookup.asyncCause,
+ lookup.mutedErrors, parentHash,
+ JSPrincipalsPtrHasher::hash(lookup.principals));
+}
+
+/* static */
+bool SavedFrame::HashPolicy::match(SavedFrame* existing, const Lookup& lookup) {
+ MOZ_ASSERT(existing);
+
+ if (existing->getLine() != lookup.line) {
+ return false;
+ }
+
+ if (existing->getColumn() != lookup.column) {
+ return false;
+ }
+
+ if (existing->getParent() != lookup.parent) {
+ return false;
+ }
+
+ if (existing->getPrincipals() != lookup.principals) {
+ return false;
+ }
+
+ JSAtom* source = existing->getSource();
+ if (source != lookup.source) {
+ return false;
+ }
+
+ JSAtom* functionDisplayName = existing->getFunctionDisplayName();
+ if (functionDisplayName != lookup.functionDisplayName) {
+ return false;
+ }
+
+ JSAtom* asyncCause = existing->getAsyncCause();
+ if (asyncCause != lookup.asyncCause) {
+ return false;
+ }
+
+ return true;
+}
+
+/* static */
+void SavedFrame::HashPolicy::rekey(Key& key, const Key& newKey) {
+ key = newKey;
+}
+
+/* static */
+bool SavedFrame::finishSavedFrameInit(JSContext* cx, HandleObject ctor,
+ HandleObject proto) {
+ return FreezeObject(cx, proto);
+}
+
+static const JSClassOps SavedFrameClassOps = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ SavedFrame::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const ClassSpec SavedFrame::classSpec_ = {
+ GenericCreateConstructor<SavedFrame::construct, 0, gc::AllocKind::FUNCTION>,
+ GenericCreatePrototype<SavedFrame>,
+ SavedFrame::staticFunctions,
+ nullptr,
+ SavedFrame::protoFunctions,
+ SavedFrame::protoAccessors,
+ SavedFrame::finishSavedFrameInit,
+ ClassSpec::DontDefineConstructor};
+
+/* static */ const JSClass SavedFrame::class_ = {
+ "SavedFrame",
+ JSCLASS_HAS_RESERVED_SLOTS(SavedFrame::JSSLOT_COUNT) |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_SavedFrame) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &SavedFrameClassOps, &SavedFrame::classSpec_};
+
+const JSClass SavedFrame::protoClass_ = {
+ "SavedFrame.prototype", JSCLASS_HAS_CACHED_PROTO(JSProto_SavedFrame),
+ JS_NULL_CLASS_OPS, &SavedFrame::classSpec_};
+
+/* static */ const JSFunctionSpec SavedFrame::staticFunctions[] = {JS_FS_END};
+
+/* static */ const JSFunctionSpec SavedFrame::protoFunctions[] = {
+ JS_FN("constructor", SavedFrame::construct, 0, 0),
+ JS_FN("toString", SavedFrame::toStringMethod, 0, 0), JS_FS_END};
+
+/* static */ const JSPropertySpec SavedFrame::protoAccessors[] = {
+ JS_PSG("source", SavedFrame::sourceProperty, 0),
+ JS_PSG("sourceId", SavedFrame::sourceIdProperty, 0),
+ JS_PSG("line", SavedFrame::lineProperty, 0),
+ JS_PSG("column", SavedFrame::columnProperty, 0),
+ JS_PSG("functionDisplayName", SavedFrame::functionDisplayNameProperty, 0),
+ JS_PSG("asyncCause", SavedFrame::asyncCauseProperty, 0),
+ JS_PSG("asyncParent", SavedFrame::asyncParentProperty, 0),
+ JS_PSG("parent", SavedFrame::parentProperty, 0),
+ JS_STRING_SYM_PS(toStringTag, "SavedFrame", JSPROP_READONLY),
+ JS_PS_END};
+
+/* static */
+void SavedFrame::finalize(JS::GCContext* gcx, JSObject* obj) {
+ MOZ_ASSERT(gcx->onMainThread());
+ JSPrincipals* p = obj->as<SavedFrame>().getPrincipals();
+ if (p) {
+ JSRuntime* rt = obj->runtimeFromMainThread();
+ JS_DropPrincipals(rt->mainContextFromOwnThread(), p);
+ }
+}
+
+JSAtom* SavedFrame::getSource() {
+ const Value& v = getReservedSlot(JSSLOT_SOURCE);
+ JSString* s = v.toString();
+ return &s->asAtom();
+}
+
+uint32_t SavedFrame::getSourceId() {
+ const Value& v = getReservedSlot(JSSLOT_SOURCEID);
+ return v.toPrivateUint32();
+}
+
+uint32_t SavedFrame::getLine() {
+ const Value& v = getReservedSlot(JSSLOT_LINE);
+ return v.toPrivateUint32();
+}
+
+uint32_t SavedFrame::getColumn() {
+ const Value& v = getReservedSlot(JSSLOT_COLUMN);
+ return v.toPrivateUint32();
+}
+
+JSAtom* SavedFrame::getFunctionDisplayName() {
+ const Value& v = getReservedSlot(JSSLOT_FUNCTIONDISPLAYNAME);
+ if (v.isNull()) {
+ return nullptr;
+ }
+ JSString* s = v.toString();
+ return &s->asAtom();
+}
+
+JSAtom* SavedFrame::getAsyncCause() {
+ const Value& v = getReservedSlot(JSSLOT_ASYNCCAUSE);
+ if (v.isNull()) {
+ return nullptr;
+ }
+ JSString* s = v.toString();
+ return &s->asAtom();
+}
+
+SavedFrame* SavedFrame::getParent() const {
+ const Value& v = getReservedSlot(JSSLOT_PARENT);
+ return v.isObject() ? &v.toObject().as<SavedFrame>() : nullptr;
+}
+
+JSPrincipals* SavedFrame::getPrincipals() {
+ const Value& v = getReservedSlot(JSSLOT_PRINCIPALS);
+ if (v.isUndefined()) {
+ return nullptr;
+ }
+ return reinterpret_cast<JSPrincipals*>(uintptr_t(v.toPrivate()) & ~0b1);
+}
+
+bool SavedFrame::getMutedErrors() {
+ const Value& v = getReservedSlot(JSSLOT_PRINCIPALS);
+ if (v.isUndefined()) {
+ return true;
+ }
+ return bool(uintptr_t(v.toPrivate()) & 0b1);
+}
+
+void SavedFrame::initSource(JSAtom* source) {
+ MOZ_ASSERT(source);
+ initReservedSlot(JSSLOT_SOURCE, StringValue(source));
+}
+
+void SavedFrame::initSourceId(uint32_t sourceId) {
+ initReservedSlot(JSSLOT_SOURCEID, PrivateUint32Value(sourceId));
+}
+
+void SavedFrame::initLine(uint32_t line) {
+ initReservedSlot(JSSLOT_LINE, PrivateUint32Value(line));
+}
+
+void SavedFrame::initColumn(uint32_t column) {
+ if (js::SupportDifferentialTesting()) {
+ column = 0;
+ }
+ initReservedSlot(JSSLOT_COLUMN, PrivateUint32Value(column));
+}
+
+void SavedFrame::initPrincipalsAndMutedErrors(JSPrincipals* principals,
+ bool mutedErrors) {
+ if (principals) {
+ JS_HoldPrincipals(principals);
+ }
+ initPrincipalsAlreadyHeldAndMutedErrors(principals, mutedErrors);
+}
+
+void SavedFrame::initPrincipalsAlreadyHeldAndMutedErrors(
+ JSPrincipals* principals, bool mutedErrors) {
+ MOZ_ASSERT_IF(principals, principals->refcount > 0);
+ uintptr_t ptr = uintptr_t(principals) | mutedErrors;
+ initReservedSlot(JSSLOT_PRINCIPALS,
+ PrivateValue(reinterpret_cast<void*>(ptr)));
+}
+
+void SavedFrame::initFunctionDisplayName(JSAtom* maybeName) {
+ initReservedSlot(JSSLOT_FUNCTIONDISPLAYNAME,
+ maybeName ? StringValue(maybeName) : NullValue());
+}
+
+void SavedFrame::initAsyncCause(JSAtom* maybeCause) {
+ initReservedSlot(JSSLOT_ASYNCCAUSE,
+ maybeCause ? StringValue(maybeCause) : NullValue());
+}
+
+void SavedFrame::initParent(SavedFrame* maybeParent) {
+ initReservedSlot(JSSLOT_PARENT, ObjectOrNullValue(maybeParent));
+}
+
+void SavedFrame::initFromLookup(JSContext* cx, Handle<Lookup> lookup) {
+ // Make sure any atoms used in the lookup are marked in the current zone.
+ // Normally we would try to keep these mark bits up to date around the
+ // points where the context moves between compartments, but Lookups live on
+ // the stack (where the atoms are kept alive regardless) and this is a
+ // more convenient pinchpoint.
+ if (lookup.source()) {
+ cx->markAtom(lookup.source());
+ }
+ if (lookup.functionDisplayName()) {
+ cx->markAtom(lookup.functionDisplayName());
+ }
+ if (lookup.asyncCause()) {
+ cx->markAtom(lookup.asyncCause());
+ }
+
+ initSource(lookup.source());
+ initSourceId(lookup.sourceId());
+ initLine(lookup.line());
+ initColumn(lookup.column());
+ initFunctionDisplayName(lookup.functionDisplayName());
+ initAsyncCause(lookup.asyncCause());
+ initParent(lookup.parent());
+ initPrincipalsAndMutedErrors(lookup.principals(), lookup.mutedErrors());
+}
+
+/* static */
+SavedFrame* SavedFrame::create(JSContext* cx) {
+ Rooted<GlobalObject*> global(cx, cx->global());
+ cx->check(global);
+
+ // Ensure that we don't try to capture the stack again in the
+ // `SavedStacksMetadataBuilder` for this new SavedFrame object, and
+ // accidentally cause O(n^2) behavior.
+ SavedStacks::AutoReentrancyGuard guard(cx->realm()->savedStacks());
+
+ RootedObject proto(cx,
+ GlobalObject::getOrCreateSavedFramePrototype(cx, global));
+ if (!proto) {
+ return nullptr;
+ }
+ cx->check(proto);
+
+ return NewTenuredObjectWithGivenProto<SavedFrame>(cx, proto);
+}
+
+bool SavedFrame::isSelfHosted(JSContext* cx) {
+ JSAtom* source = getSource();
+ return source == cx->names().selfHosted;
+}
+
+bool SavedFrame::isWasm() {
+ // See WasmFrameIter::computeLine() comment.
+ return bool(getColumn() & wasm::WasmFrameIter::ColumnBit);
+}
+
+uint32_t SavedFrame::wasmFuncIndex() {
+ // See WasmFrameIter::computeLine() comment.
+ MOZ_ASSERT(isWasm());
+ return getColumn() & ~wasm::WasmFrameIter::ColumnBit;
+}
+
+uint32_t SavedFrame::wasmBytecodeOffset() {
+ // See WasmFrameIter::computeLine() comment.
+ MOZ_ASSERT(isWasm());
+ return getLine();
+}
+
+/* static */
+bool SavedFrame::construct(JSContext* cx, unsigned argc, Value* vp) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_NO_CONSTRUCTOR,
+ "SavedFrame");
+ return false;
+}
+
+static bool SavedFrameSubsumedByPrincipals(JSContext* cx,
+ JSPrincipals* principals,
+ Handle<SavedFrame*> frame) {
+ auto subsumes = cx->runtime()->securityCallbacks->subsumes;
+ if (!subsumes) {
+ return true;
+ }
+
+ MOZ_ASSERT(!ReconstructedSavedFramePrincipals::is(principals));
+
+ auto framePrincipals = frame->getPrincipals();
+
+ // Handle SavedFrames that have been reconstructed from stacks in a heap
+ // snapshot.
+ if (framePrincipals == &ReconstructedSavedFramePrincipals::IsSystem) {
+ return cx->runningWithTrustedPrincipals();
+ }
+ if (framePrincipals == &ReconstructedSavedFramePrincipals::IsNotSystem) {
+ return true;
+ }
+
+ return subsumes(principals, framePrincipals);
+}
+
+// Return the first SavedFrame in the chain that starts with |frame| whose
+// for which the given match function returns true. If there is no such frame,
+// return nullptr. |skippedAsync| is set to true if any of the skipped frames
+// had the |asyncCause| property set, otherwise it is explicitly set to false.
+template <typename Matcher>
+static SavedFrame* GetFirstMatchedFrame(JSContext* cx, JSPrincipals* principals,
+ Matcher& matches,
+ Handle<SavedFrame*> frame,
+ JS::SavedFrameSelfHosted selfHosted,
+ bool& skippedAsync) {
+ skippedAsync = false;
+
+ Rooted<SavedFrame*> rootedFrame(cx, frame);
+ while (rootedFrame) {
+ if ((selfHosted == JS::SavedFrameSelfHosted::Include ||
+ !rootedFrame->isSelfHosted(cx)) &&
+ matches(cx, principals, rootedFrame)) {
+ return rootedFrame;
+ }
+
+ if (rootedFrame->getAsyncCause()) {
+ skippedAsync = true;
+ }
+
+ rootedFrame = rootedFrame->getParent();
+ }
+
+ return nullptr;
+}
+
+// Return the first SavedFrame in the chain that starts with |frame| whose
+// principals are subsumed by |principals|, according to |subsumes|. If there is
+// no such frame, return nullptr. |skippedAsync| is set to true if any of the
+// skipped frames had the |asyncCause| property set, otherwise it is explicitly
+// set to false.
+static SavedFrame* GetFirstSubsumedFrame(JSContext* cx,
+ JSPrincipals* principals,
+ Handle<SavedFrame*> frame,
+ JS::SavedFrameSelfHosted selfHosted,
+ bool& skippedAsync) {
+ return GetFirstMatchedFrame(cx, principals, SavedFrameSubsumedByPrincipals,
+ frame, selfHosted, skippedAsync);
+}
+
+JS_PUBLIC_API JSObject* GetFirstSubsumedSavedFrame(
+ JSContext* cx, JSPrincipals* principals, HandleObject savedFrame,
+ JS::SavedFrameSelfHosted selfHosted) {
+ if (!savedFrame) {
+ return nullptr;
+ }
+
+ auto subsumes = cx->runtime()->securityCallbacks->subsumes;
+ if (!subsumes) {
+ return nullptr;
+ }
+
+ auto matcher = [subsumes](JSContext* cx, JSPrincipals* principals,
+ Handle<SavedFrame*> frame) -> bool {
+ return subsumes(principals, frame->getPrincipals());
+ };
+
+ bool skippedAsync;
+ Rooted<SavedFrame*> frame(cx, &savedFrame->as<SavedFrame>());
+ return GetFirstMatchedFrame(cx, principals, matcher, frame, selfHosted,
+ skippedAsync);
+}
+
+[[nodiscard]] static bool SavedFrame_checkThis(JSContext* cx, CallArgs& args,
+ const char* fnName,
+ MutableHandleObject frame) {
+ const Value& thisValue = args.thisv();
+
+ if (!thisValue.isObject()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_OBJECT_REQUIRED,
+ InformalValueTypeName(thisValue));
+ return false;
+ }
+
+ if (!thisValue.toObject().canUnwrapAs<SavedFrame>()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_INCOMPATIBLE_PROTO, SavedFrame::class_.name,
+ fnName, "object");
+ return false;
+ }
+
+ // Now set "frame" to the actual object we were invoked in (which may be a
+ // wrapper), not the unwrapped version. Consumers will need to know what
+ // that original object was, and will do principal checks as needed.
+ frame.set(&thisValue.toObject());
+ return true;
+}
+
+// Get the SavedFrame * from the current this value and handle any errors that
+// might occur therein.
+//
+// These parameters must already exist when calling this macro:
+// - JSContext* cx
+// - unsigned argc
+// - Value* vp
+// - const char* fnName
+// These parameters will be defined after calling this macro:
+// - CallArgs args
+// - Rooted<SavedFrame*> frame (will be non-null)
+#define THIS_SAVEDFRAME(cx, argc, vp, fnName, args, frame) \
+ CallArgs args = CallArgsFromVp(argc, vp); \
+ RootedObject frame(cx); \
+ if (!SavedFrame_checkThis(cx, args, fnName, &frame)) return false;
+
+} /* namespace js */
+
+js::SavedFrame* js::UnwrapSavedFrame(JSContext* cx, JSPrincipals* principals,
+ HandleObject obj,
+ JS::SavedFrameSelfHosted selfHosted,
+ bool& skippedAsync) {
+ if (!obj) {
+ return nullptr;
+ }
+
+ Rooted<SavedFrame*> frame(cx, obj->maybeUnwrapAs<SavedFrame>());
+ if (!frame) {
+ return nullptr;
+ }
+
+ return GetFirstSubsumedFrame(cx, principals, frame, selfHosted, skippedAsync);
+}
+
+namespace JS {
+
+JS_PUBLIC_API SavedFrameResult GetSavedFrameSource(
+ JSContext* cx, JSPrincipals* principals, HandleObject savedFrame,
+ MutableHandleString sourcep,
+ SavedFrameSelfHosted selfHosted /* = SavedFrameSelfHosted::Include */) {
+ js::AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_RELEASE_ASSERT(cx->realm());
+
+ {
+ bool skippedAsync;
+ Rooted<js::SavedFrame*> frame(
+ cx,
+ UnwrapSavedFrame(cx, principals, savedFrame, selfHosted, skippedAsync));
+ if (!frame) {
+ sourcep.set(cx->runtime()->emptyString);
+ return SavedFrameResult::AccessDenied;
+ }
+ sourcep.set(frame->getSource());
+ }
+ if (sourcep->isAtom()) {
+ cx->markAtom(&sourcep->asAtom());
+ }
+ return SavedFrameResult::Ok;
+}
+
+JS_PUBLIC_API SavedFrameResult GetSavedFrameSourceId(
+ JSContext* cx, JSPrincipals* principals, HandleObject savedFrame,
+ uint32_t* sourceIdp,
+ SavedFrameSelfHosted selfHosted /* = SavedFrameSelfHosted::Include */) {
+ js::AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_RELEASE_ASSERT(cx->realm());
+
+ bool skippedAsync;
+ Rooted<js::SavedFrame*> frame(cx, UnwrapSavedFrame(cx, principals, savedFrame,
+ selfHosted, skippedAsync));
+ if (!frame) {
+ *sourceIdp = 0;
+ return SavedFrameResult::AccessDenied;
+ }
+ *sourceIdp = frame->getSourceId();
+ return SavedFrameResult::Ok;
+}
+
+JS_PUBLIC_API SavedFrameResult GetSavedFrameLine(
+ JSContext* cx, JSPrincipals* principals, HandleObject savedFrame,
+ uint32_t* linep,
+ SavedFrameSelfHosted selfHosted /* = SavedFrameSelfHosted::Include */) {
+ js::AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_RELEASE_ASSERT(cx->realm());
+ MOZ_ASSERT(linep);
+
+ bool skippedAsync;
+ Rooted<js::SavedFrame*> frame(cx, UnwrapSavedFrame(cx, principals, savedFrame,
+ selfHosted, skippedAsync));
+ if (!frame) {
+ *linep = 0;
+ return SavedFrameResult::AccessDenied;
+ }
+ *linep = frame->getLine();
+ return SavedFrameResult::Ok;
+}
+
+JS_PUBLIC_API SavedFrameResult GetSavedFrameColumn(
+ JSContext* cx, JSPrincipals* principals, HandleObject savedFrame,
+ uint32_t* columnp,
+ SavedFrameSelfHosted selfHosted /* = SavedFrameSelfHosted::Include */) {
+ js::AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_RELEASE_ASSERT(cx->realm());
+ MOZ_ASSERT(columnp);
+
+ bool skippedAsync;
+ Rooted<js::SavedFrame*> frame(cx, UnwrapSavedFrame(cx, principals, savedFrame,
+ selfHosted, skippedAsync));
+ if (!frame) {
+ *columnp = 0;
+ return SavedFrameResult::AccessDenied;
+ }
+ *columnp = frame->getColumn();
+ return SavedFrameResult::Ok;
+}
+
+JS_PUBLIC_API SavedFrameResult GetSavedFrameFunctionDisplayName(
+ JSContext* cx, JSPrincipals* principals, HandleObject savedFrame,
+ MutableHandleString namep,
+ SavedFrameSelfHosted selfHosted /* = SavedFrameSelfHosted::Include */) {
+ js::AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_RELEASE_ASSERT(cx->realm());
+
+ {
+ bool skippedAsync;
+ Rooted<js::SavedFrame*> frame(
+ cx,
+ UnwrapSavedFrame(cx, principals, savedFrame, selfHosted, skippedAsync));
+ if (!frame) {
+ namep.set(nullptr);
+ return SavedFrameResult::AccessDenied;
+ }
+ namep.set(frame->getFunctionDisplayName());
+ }
+ if (namep && namep->isAtom()) {
+ cx->markAtom(&namep->asAtom());
+ }
+ return SavedFrameResult::Ok;
+}
+
+JS_PUBLIC_API SavedFrameResult GetSavedFrameAsyncCause(
+ JSContext* cx, JSPrincipals* principals, HandleObject savedFrame,
+ MutableHandleString asyncCausep,
+ SavedFrameSelfHosted unused_ /* = SavedFrameSelfHosted::Include */) {
+ js::AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_RELEASE_ASSERT(cx->realm());
+
+ {
+ bool skippedAsync;
+ // This function is always called with self-hosted frames excluded by
+ // GetValueIfNotCached in dom/bindings/Exceptions.cpp. However, we want
+ // to include them because our Promise implementation causes us to have
+ // the async cause on a self-hosted frame. So we just ignore the
+ // parameter and always include self-hosted frames.
+ Rooted<js::SavedFrame*> frame(
+ cx, UnwrapSavedFrame(cx, principals, savedFrame,
+ SavedFrameSelfHosted::Include, skippedAsync));
+ if (!frame) {
+ asyncCausep.set(nullptr);
+ return SavedFrameResult::AccessDenied;
+ }
+ asyncCausep.set(frame->getAsyncCause());
+ if (!asyncCausep && skippedAsync) {
+ asyncCausep.set(cx->names().Async);
+ }
+ }
+ if (asyncCausep && asyncCausep->isAtom()) {
+ cx->markAtom(&asyncCausep->asAtom());
+ }
+ return SavedFrameResult::Ok;
+}
+
+JS_PUBLIC_API SavedFrameResult GetSavedFrameAsyncParent(
+ JSContext* cx, JSPrincipals* principals, HandleObject savedFrame,
+ MutableHandleObject asyncParentp,
+ SavedFrameSelfHosted selfHosted /* = SavedFrameSelfHosted::Include */) {
+ js::AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_RELEASE_ASSERT(cx->realm());
+
+ bool skippedAsync;
+ Rooted<js::SavedFrame*> frame(cx, UnwrapSavedFrame(cx, principals, savedFrame,
+ selfHosted, skippedAsync));
+ if (!frame) {
+ asyncParentp.set(nullptr);
+ return SavedFrameResult::AccessDenied;
+ }
+ Rooted<js::SavedFrame*> parent(cx, frame->getParent());
+
+ // The current value of |skippedAsync| is not interesting, because we are
+ // interested in whether we would cross any async parents to get from here
+ // to the first subsumed parent frame instead.
+ Rooted<js::SavedFrame*> subsumedParent(
+ cx,
+ GetFirstSubsumedFrame(cx, principals, parent, selfHosted, skippedAsync));
+
+ // Even if |parent| is not subsumed, we still want to return a pointer to it
+ // rather than |subsumedParent| so it can pick up any |asyncCause| from the
+ // inaccessible part of the chain.
+ if (subsumedParent && (subsumedParent->getAsyncCause() || skippedAsync)) {
+ asyncParentp.set(parent);
+ } else {
+ asyncParentp.set(nullptr);
+ }
+ return SavedFrameResult::Ok;
+}
+
+JS_PUBLIC_API SavedFrameResult GetSavedFrameParent(
+ JSContext* cx, JSPrincipals* principals, HandleObject savedFrame,
+ MutableHandleObject parentp,
+ SavedFrameSelfHosted selfHosted /* = SavedFrameSelfHosted::Include */) {
+ js::AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_RELEASE_ASSERT(cx->realm());
+
+ bool skippedAsync;
+ Rooted<js::SavedFrame*> frame(cx, UnwrapSavedFrame(cx, principals, savedFrame,
+ selfHosted, skippedAsync));
+ if (!frame) {
+ parentp.set(nullptr);
+ return SavedFrameResult::AccessDenied;
+ }
+ Rooted<js::SavedFrame*> parent(cx, frame->getParent());
+
+ // The current value of |skippedAsync| is not interesting, because we are
+ // interested in whether we would cross any async parents to get from here
+ // to the first subsumed parent frame instead.
+ Rooted<js::SavedFrame*> subsumedParent(
+ cx,
+ GetFirstSubsumedFrame(cx, principals, parent, selfHosted, skippedAsync));
+
+ // Even if |parent| is not subsumed, we still want to return a pointer to it
+ // rather than |subsumedParent| so it can pick up any |asyncCause| from the
+ // inaccessible part of the chain.
+ if (subsumedParent && !(subsumedParent->getAsyncCause() || skippedAsync)) {
+ parentp.set(parent);
+ } else {
+ parentp.set(nullptr);
+ }
+ return SavedFrameResult::Ok;
+}
+
+static bool FormatStackFrameLine(js::StringBuffer& sb,
+ JS::Handle<js::SavedFrame*> frame) {
+ if (frame->isWasm()) {
+ // See comment in WasmFrameIter::computeLine().
+ return sb.append("wasm-function[") &&
+ NumberValueToStringBuffer(NumberValue(frame->wasmFuncIndex()), sb) &&
+ sb.append(']');
+ }
+
+ return NumberValueToStringBuffer(NumberValue(frame->getLine()), sb);
+}
+
+static bool FormatStackFrameColumn(js::StringBuffer& sb,
+ JS::Handle<js::SavedFrame*> frame) {
+ if (frame->isWasm()) {
+ // See comment in WasmFrameIter::computeLine().
+ js::Int32ToCStringBuf cbuf;
+ size_t cstrlen;
+ const char* cstr =
+ Uint32ToHexCString(&cbuf, frame->wasmBytecodeOffset(), &cstrlen);
+ MOZ_ASSERT(cstr);
+
+ return sb.append("0x") && sb.append(cstr, cstrlen);
+ }
+
+ return NumberValueToStringBuffer(NumberValue(frame->getColumn()), sb);
+}
+
+static bool FormatSpiderMonkeyStackFrame(JSContext* cx, js::StringBuffer& sb,
+ JS::Handle<js::SavedFrame*> frame,
+ size_t indent, bool skippedAsync) {
+ RootedString asyncCause(cx, frame->getAsyncCause());
+ if (!asyncCause && skippedAsync) {
+ asyncCause.set(cx->names().Async);
+ }
+
+ Rooted<JSAtom*> name(cx, frame->getFunctionDisplayName());
+ return (!indent || sb.appendN(' ', indent)) &&
+ (!asyncCause || (sb.append(asyncCause) && sb.append('*'))) &&
+ (!name || sb.append(name)) && sb.append('@') &&
+ sb.append(frame->getSource()) && sb.append(':') &&
+ FormatStackFrameLine(sb, frame) && sb.append(':') &&
+ FormatStackFrameColumn(sb, frame) && sb.append('\n');
+}
+
+static bool FormatV8StackFrame(JSContext* cx, js::StringBuffer& sb,
+ JS::Handle<js::SavedFrame*> frame, size_t indent,
+ bool lastFrame) {
+ Rooted<JSAtom*> name(cx, frame->getFunctionDisplayName());
+ return sb.appendN(' ', indent + 4) && sb.append('a') && sb.append('t') &&
+ sb.append(' ') &&
+ (!name || (sb.append(name) && sb.append(' ') && sb.append('('))) &&
+ sb.append(frame->getSource()) && sb.append(':') &&
+ FormatStackFrameLine(sb, frame) && sb.append(':') &&
+ FormatStackFrameColumn(sb, frame) && (!name || sb.append(')')) &&
+ (lastFrame || sb.append('\n'));
+}
+
+JS_PUBLIC_API bool BuildStackString(JSContext* cx, JSPrincipals* principals,
+ HandleObject stack,
+ MutableHandleString stringp, size_t indent,
+ js::StackFormat format) {
+ js::AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ MOZ_RELEASE_ASSERT(cx->realm());
+
+ js::JSStringBuilder sb(cx);
+
+ if (format == js::StackFormat::Default) {
+ format = cx->runtime()->stackFormat();
+ }
+ MOZ_ASSERT(format != js::StackFormat::Default);
+
+ // Enter a new block to constrain the scope of possibly entering the stack's
+ // realm. This ensures that when we finish the StringBuffer, we are back in
+ // the cx's original compartment, and fulfill our contract with callers to
+ // place the output string in the cx's current realm.
+ {
+ bool skippedAsync;
+ Rooted<js::SavedFrame*> frame(
+ cx, UnwrapSavedFrame(cx, principals, stack,
+ SavedFrameSelfHosted::Exclude, skippedAsync));
+ if (!frame) {
+ stringp.set(cx->runtime()->emptyString);
+ return true;
+ }
+
+ Rooted<js::SavedFrame*> parent(cx);
+ do {
+ MOZ_ASSERT(SavedFrameSubsumedByPrincipals(cx, principals, frame));
+ MOZ_ASSERT(!frame->isSelfHosted(cx));
+
+ parent = frame->getParent();
+ bool skippedNextAsync;
+ Rooted<js::SavedFrame*> nextFrame(
+ cx, js::GetFirstSubsumedFrame(cx, principals, parent,
+ SavedFrameSelfHosted::Exclude,
+ skippedNextAsync));
+
+ switch (format) {
+ case js::StackFormat::SpiderMonkey:
+ if (!FormatSpiderMonkeyStackFrame(cx, sb, frame, indent,
+ skippedAsync)) {
+ return false;
+ }
+ break;
+ case js::StackFormat::V8:
+ if (!FormatV8StackFrame(cx, sb, frame, indent, !nextFrame)) {
+ return false;
+ }
+ break;
+ case js::StackFormat::Default:
+ MOZ_CRASH("Unexpected value");
+ break;
+ }
+
+ frame = nextFrame;
+ skippedAsync = skippedNextAsync;
+ } while (frame);
+ }
+
+ JSString* str = sb.finishString();
+ if (!str) {
+ return false;
+ }
+ cx->check(str);
+ stringp.set(str);
+ return true;
+}
+
+JS_PUBLIC_API bool IsMaybeWrappedSavedFrame(JSObject* obj) {
+ MOZ_ASSERT(obj);
+ return obj->canUnwrapAs<js::SavedFrame>();
+}
+
+JS_PUBLIC_API bool IsUnwrappedSavedFrame(JSObject* obj) {
+ MOZ_ASSERT(obj);
+ return obj->is<js::SavedFrame>();
+}
+
+static bool AssignProperty(JSContext* cx, HandleObject dst, HandleObject src,
+ const char* property) {
+ RootedValue v(cx);
+ return JS_GetProperty(cx, src, property, &v) &&
+ JS_DefineProperty(cx, dst, property, v, JSPROP_ENUMERATE);
+}
+
+JS_PUBLIC_API JSObject* ConvertSavedFrameToPlainObject(
+ JSContext* cx, HandleObject savedFrameArg,
+ SavedFrameSelfHosted selfHosted) {
+ MOZ_ASSERT(savedFrameArg);
+
+ RootedObject savedFrame(cx, savedFrameArg);
+ RootedObject baseConverted(cx), lastConverted(cx);
+ RootedValue v(cx);
+
+ baseConverted = lastConverted = JS_NewObject(cx, nullptr);
+ if (!baseConverted) {
+ return nullptr;
+ }
+
+ bool foundParent;
+ do {
+ if (!AssignProperty(cx, lastConverted, savedFrame, "source") ||
+ !AssignProperty(cx, lastConverted, savedFrame, "sourceId") ||
+ !AssignProperty(cx, lastConverted, savedFrame, "line") ||
+ !AssignProperty(cx, lastConverted, savedFrame, "column") ||
+ !AssignProperty(cx, lastConverted, savedFrame, "functionDisplayName") ||
+ !AssignProperty(cx, lastConverted, savedFrame, "asyncCause")) {
+ return nullptr;
+ }
+
+ const char* parentProperties[] = {"parent", "asyncParent"};
+ foundParent = false;
+ for (const char* prop : parentProperties) {
+ if (!JS_GetProperty(cx, savedFrame, prop, &v)) {
+ return nullptr;
+ }
+ if (v.isObject()) {
+ RootedObject nextConverted(cx, JS_NewObject(cx, nullptr));
+ if (!nextConverted ||
+ !JS_DefineProperty(cx, lastConverted, prop, nextConverted,
+ JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ lastConverted = nextConverted;
+ savedFrame = &v.toObject();
+ foundParent = true;
+ break;
+ }
+ }
+ } while (foundParent);
+
+ return baseConverted;
+}
+
+} /* namespace JS */
+
+namespace js {
+
+/* static */
+bool SavedFrame::sourceProperty(JSContext* cx, unsigned argc, Value* vp) {
+ THIS_SAVEDFRAME(cx, argc, vp, "(get source)", args, frame);
+ JSPrincipals* principals = cx->realm()->principals();
+ RootedString source(cx);
+ if (JS::GetSavedFrameSource(cx, principals, frame, &source) ==
+ JS::SavedFrameResult::Ok) {
+ if (!cx->compartment()->wrap(cx, &source)) {
+ return false;
+ }
+ args.rval().setString(source);
+ } else {
+ args.rval().setNull();
+ }
+ return true;
+}
+
+/* static */
+bool SavedFrame::sourceIdProperty(JSContext* cx, unsigned argc, Value* vp) {
+ THIS_SAVEDFRAME(cx, argc, vp, "(get sourceId)", args, frame);
+ JSPrincipals* principals = cx->realm()->principals();
+ uint32_t sourceId;
+ if (JS::GetSavedFrameSourceId(cx, principals, frame, &sourceId) ==
+ JS::SavedFrameResult::Ok) {
+ args.rval().setNumber(sourceId);
+ } else {
+ args.rval().setNull();
+ }
+ return true;
+}
+
+/* static */
+bool SavedFrame::lineProperty(JSContext* cx, unsigned argc, Value* vp) {
+ THIS_SAVEDFRAME(cx, argc, vp, "(get line)", args, frame);
+ JSPrincipals* principals = cx->realm()->principals();
+ uint32_t line;
+ if (JS::GetSavedFrameLine(cx, principals, frame, &line) ==
+ JS::SavedFrameResult::Ok) {
+ args.rval().setNumber(line);
+ } else {
+ args.rval().setNull();
+ }
+ return true;
+}
+
+/* static */
+bool SavedFrame::columnProperty(JSContext* cx, unsigned argc, Value* vp) {
+ THIS_SAVEDFRAME(cx, argc, vp, "(get column)", args, frame);
+ JSPrincipals* principals = cx->realm()->principals();
+ uint32_t column;
+ if (JS::GetSavedFrameColumn(cx, principals, frame, &column) ==
+ JS::SavedFrameResult::Ok) {
+ args.rval().setNumber(column);
+ } else {
+ args.rval().setNull();
+ }
+ return true;
+}
+
+/* static */
+bool SavedFrame::functionDisplayNameProperty(JSContext* cx, unsigned argc,
+ Value* vp) {
+ THIS_SAVEDFRAME(cx, argc, vp, "(get functionDisplayName)", args, frame);
+ JSPrincipals* principals = cx->realm()->principals();
+ RootedString name(cx);
+ JS::SavedFrameResult result =
+ JS::GetSavedFrameFunctionDisplayName(cx, principals, frame, &name);
+ if (result == JS::SavedFrameResult::Ok && name) {
+ if (!cx->compartment()->wrap(cx, &name)) {
+ return false;
+ }
+ args.rval().setString(name);
+ } else {
+ args.rval().setNull();
+ }
+ return true;
+}
+
+/* static */
+bool SavedFrame::asyncCauseProperty(JSContext* cx, unsigned argc, Value* vp) {
+ THIS_SAVEDFRAME(cx, argc, vp, "(get asyncCause)", args, frame);
+ JSPrincipals* principals = cx->realm()->principals();
+ RootedString asyncCause(cx);
+ JS::SavedFrameResult result =
+ JS::GetSavedFrameAsyncCause(cx, principals, frame, &asyncCause);
+ if (result == JS::SavedFrameResult::Ok && asyncCause) {
+ if (!cx->compartment()->wrap(cx, &asyncCause)) {
+ return false;
+ }
+ args.rval().setString(asyncCause);
+ } else {
+ args.rval().setNull();
+ }
+ return true;
+}
+
+/* static */
+bool SavedFrame::asyncParentProperty(JSContext* cx, unsigned argc, Value* vp) {
+ THIS_SAVEDFRAME(cx, argc, vp, "(get asyncParent)", args, frame);
+ JSPrincipals* principals = cx->realm()->principals();
+ RootedObject asyncParent(cx);
+ (void)JS::GetSavedFrameAsyncParent(cx, principals, frame, &asyncParent);
+ if (!cx->compartment()->wrap(cx, &asyncParent)) {
+ return false;
+ }
+ args.rval().setObjectOrNull(asyncParent);
+ return true;
+}
+
+/* static */
+bool SavedFrame::parentProperty(JSContext* cx, unsigned argc, Value* vp) {
+ THIS_SAVEDFRAME(cx, argc, vp, "(get parent)", args, frame);
+ JSPrincipals* principals = cx->realm()->principals();
+ RootedObject parent(cx);
+ (void)JS::GetSavedFrameParent(cx, principals, frame, &parent);
+ if (!cx->compartment()->wrap(cx, &parent)) {
+ return false;
+ }
+ args.rval().setObjectOrNull(parent);
+ return true;
+}
+
+/* static */
+bool SavedFrame::toStringMethod(JSContext* cx, unsigned argc, Value* vp) {
+ THIS_SAVEDFRAME(cx, argc, vp, "toString", args, frame);
+ JSPrincipals* principals = cx->realm()->principals();
+ RootedString string(cx);
+ if (!JS::BuildStackString(cx, principals, frame, &string)) {
+ return false;
+ }
+ args.rval().setString(string);
+ return true;
+}
+
+bool SavedStacks::saveCurrentStack(
+ JSContext* cx, MutableHandle<SavedFrame*> frame,
+ JS::StackCapture&& capture /* = JS::StackCapture(JS::AllFrames()) */) {
+ MOZ_RELEASE_ASSERT(cx->realm());
+ MOZ_DIAGNOSTIC_ASSERT(&cx->realm()->savedStacks() == this);
+
+ if (creatingSavedFrame || cx->isExceptionPending() || !cx->global() ||
+ !cx->global()->isStandardClassResolved(JSProto_Object)) {
+ frame.set(nullptr);
+ return true;
+ }
+
+ AutoGeckoProfilerEntry labelFrame(cx, "js::SavedStacks::saveCurrentStack");
+ return insertFrames(cx, frame, std::move(capture));
+}
+
+bool SavedStacks::copyAsyncStack(JSContext* cx, HandleObject asyncStack,
+ HandleString asyncCause,
+ MutableHandle<SavedFrame*> adoptedStack,
+ const Maybe<size_t>& maxFrameCount) {
+ MOZ_RELEASE_ASSERT(cx->realm());
+ MOZ_DIAGNOSTIC_ASSERT(&cx->realm()->savedStacks() == this);
+
+ Rooted<JSAtom*> asyncCauseAtom(cx, AtomizeString(cx, asyncCause));
+ if (!asyncCauseAtom) {
+ return false;
+ }
+
+ Rooted<SavedFrame*> asyncStackObj(
+ cx, asyncStack->maybeUnwrapAs<js::SavedFrame>());
+ MOZ_RELEASE_ASSERT(asyncStackObj);
+ adoptedStack.set(asyncStackObj);
+
+ if (!adoptAsyncStack(cx, adoptedStack, asyncCauseAtom, maxFrameCount)) {
+ return false;
+ }
+
+ return true;
+}
+
+void SavedStacks::traceWeak(JSTracer* trc) {
+ frames.traceWeak(trc);
+ pcLocationMap.traceWeak(trc);
+}
+
+void SavedStacks::trace(JSTracer* trc) { pcLocationMap.trace(trc); }
+
+uint32_t SavedStacks::count() { return frames.count(); }
+
+void SavedStacks::clear() { frames.clear(); }
+
+size_t SavedStacks::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return frames.shallowSizeOfExcludingThis(mallocSizeOf) +
+ pcLocationMap.shallowSizeOfExcludingThis(mallocSizeOf);
+}
+
+// Given that we have captured a stack frame with the given principals and
+// source, return true if the requested `StackCapture` has been satisfied and
+// stack walking can halt. Return false otherwise (and stack walking and frame
+// capturing should continue).
+static inline bool captureIsSatisfied(JSContext* cx, JSPrincipals* principals,
+ const JSAtom* source,
+ JS::StackCapture& capture) {
+ class Matcher {
+ JSContext* cx_;
+ JSPrincipals* framePrincipals_;
+ const JSAtom* frameSource_;
+
+ public:
+ Matcher(JSContext* cx, JSPrincipals* principals, const JSAtom* source)
+ : cx_(cx), framePrincipals_(principals), frameSource_(source) {}
+
+ bool operator()(JS::FirstSubsumedFrame& target) {
+ auto subsumes = cx_->runtime()->securityCallbacks->subsumes;
+ return (!subsumes || subsumes(target.principals, framePrincipals_)) &&
+ (!target.ignoreSelfHosted ||
+ frameSource_ != cx_->names().selfHosted);
+ }
+
+ bool operator()(JS::MaxFrames& target) { return target.maxFrames == 1; }
+
+ bool operator()(JS::AllFrames&) { return false; }
+ };
+
+ Matcher m(cx, principals, source);
+ return capture.match(m);
+}
+
+bool SavedStacks::insertFrames(JSContext* cx, MutableHandle<SavedFrame*> frame,
+ JS::StackCapture&& capture) {
+ // In order to look up a cached SavedFrame object, we need to have its parent
+ // SavedFrame, which means we need to walk the stack from oldest frame to
+ // youngest. However, FrameIter walks the stack from youngest frame to
+ // oldest. The solution is to append stack frames to a vector as we walk the
+ // stack with FrameIter, and then do a second pass through that vector in
+ // reverse order after the traversal has completed and get or create the
+ // SavedFrame objects at that time.
+ //
+ // To avoid making many copies of FrameIter (whose copy constructor is
+ // relatively slow), we use a vector of `SavedFrame::Lookup` objects, which
+ // only contain the FrameIter data we need. The `SavedFrame::Lookup`
+ // objects are partially initialized with everything except their parent
+ // pointers on the first pass, and then we fill in the parent pointers as we
+ // return in the second pass.
+
+ // Accumulate the vector of Lookup objects here, youngest to oldest.
+ Rooted<js::GCLookupVector> stackChain(cx, js::GCLookupVector(cx));
+
+ // If we find a cached saved frame, then that supplies the parent of the
+ // frames we have placed in stackChain. If we walk the stack all the way
+ // to the end, this remains null.
+ Rooted<SavedFrame*> cachedParentFrame(cx, nullptr);
+
+ // Choose the right frame iteration strategy to accomodate both
+ // evalInFramePrev links and the LiveSavedFrameCache. For background, see
+ // the LiveSavedFrameCache comments in Stack.h.
+ //
+ // If we're using the LiveSavedFrameCache, then don't handle evalInFramePrev
+ // links by skipping over the frames altogether; that violates the cache's
+ // assumptions. Instead, traverse the entire stack, but choose each
+ // SavedFrame's parent as directed by the evalInFramePrev link, if any.
+ //
+ // If we're not using the LiveSavedFrameCache, it's hard to recover the
+ // frame to which the evalInFramePrev link refers, so we just let FrameIter
+ // skip those frames. Then each SavedFrame's parent is simply the frame that
+ // follows it in the stackChain vector, even when it has an evalInFramePrev
+ // link.
+ FrameIter iter(cx, capture.is<JS::AllFrames>()
+ ? FrameIter::IGNORE_DEBUGGER_EVAL_PREV_LINK
+ : FrameIter::FOLLOW_DEBUGGER_EVAL_PREV_LINK);
+
+ // Once we've seen one frame with its hasCachedSavedFrame bit set, all its
+ // parents (that can be cached) ought to have it set too.
+ DebugOnly<bool> seenCached = false;
+
+ // If we are using evalInFramePrev links to adjust the parents of debugger
+ // eval frames, we have to ensure the target frame is cached in the current
+ // realm. (This might not happen by default if the target frame is
+ // rematerialized, or if there is an async parent between the debugger eval
+ // frame and the target frame.) To accomplish this, we keep track of eval
+ // targets and ensure that we don't stop before they have all been reached.
+ Vector<AbstractFramePtr, 4, TempAllocPolicy> unreachedEvalTargets(cx);
+
+ while (!iter.done()) {
+ Activation& activation = *iter.activation();
+ Maybe<LiveSavedFrameCache::FramePtr> framePtr =
+ LiveSavedFrameCache::FramePtr::create(iter);
+
+ if (capture.is<JS::AllFrames>() && iter.hasUsableAbstractFramePtr()) {
+ unreachedEvalTargets.eraseIfEqual(iter.abstractFramePtr());
+ }
+
+ if (framePtr) {
+ // In general, when we reach a frame with its hasCachedSavedFrame bit set,
+ // all its parents will have the bit set as well. See the
+ // LiveSavedFrameCache comment in Activation.h for more details. Note that
+ // this invariant does not hold when we are finding the first subsumed
+ // frame. Captures using FirstSubsumedFrame ignore async parents and walk
+ // the real stack. Because we're using different rules for walking the
+ // stack, we can reach frames that weren't cached in a previous AllFrames
+ // traversal.
+ MOZ_ASSERT_IF(
+ seenCached && !capture.is<JS::FirstSubsumedFrame>(),
+ framePtr->hasCachedSavedFrame() || framePtr->isRematerializedFrame());
+ seenCached |= framePtr->hasCachedSavedFrame();
+
+ if (capture.is<JS::AllFrames>() && framePtr->isInterpreterFrame() &&
+ framePtr->asInterpreterFrame().isDebuggerEvalFrame()) {
+ AbstractFramePtr target =
+ framePtr->asInterpreterFrame().evalInFramePrev();
+ if (!unreachedEvalTargets.append(target)) {
+ return false;
+ }
+ }
+ }
+
+ if (capture.is<JS::AllFrames>() && framePtr &&
+ framePtr->hasCachedSavedFrame()) {
+ auto* cache = activation.getLiveSavedFrameCache(cx);
+ if (!cache) {
+ return false;
+ }
+ cache->find(cx, *framePtr, iter.pc(), &cachedParentFrame);
+
+ // Even though iter.hasCachedSavedFrame() was true, we may still get a
+ // cache miss, if the frame's pc doesn't match the cache entry's, or if
+ // the cache was emptied due to a realm mismatch. If we got a cache hit,
+ // and we do not have to keep looking for unreached eval target frames,
+ // we can stop traversing the stack and start building the chain.
+ if (cachedParentFrame && unreachedEvalTargets.empty()) {
+ break;
+ }
+
+ // This frame doesn't have a cache entry, despite its hasCachedSavedFrame
+ // flag being set. If this was due to a pc mismatch, we can clear the flag
+ // here and set things right. If the cache was emptied due to a realm
+ // mismatch, we should clear all the frames' flags as we walk to the
+ // bottom of the stack, so that they are all clear before we start pushing
+ // any new entries.
+ framePtr->clearHasCachedSavedFrame();
+ }
+
+ // We'll be pushing this frame onto stackChain. Gather the information
+ // needed to construct the SavedFrame::Lookup.
+ Rooted<LocationValue> location(cx);
+ {
+ AutoRealmUnchecked ar(cx, iter.realm());
+ if (!cx->realm()->savedStacks().getLocation(cx, iter, &location)) {
+ return false;
+ }
+ }
+
+ Rooted<JSAtom*> displayAtom(cx, iter.maybeFunctionDisplayAtom());
+
+ auto principals = iter.realm()->principals();
+ MOZ_ASSERT_IF(framePtr && !iter.isWasm(), iter.pc());
+
+ if (!stackChain.emplaceBack(location.source(), location.sourceId(),
+ location.line(), location.column(), displayAtom,
+ nullptr, // asyncCause
+ nullptr, // parent (not known yet)
+ principals, iter.mutedErrors(), framePtr,
+ iter.pc(), &activation)) {
+ return false;
+ }
+
+ if (captureIsSatisfied(cx, principals, location.source(), capture)) {
+ break;
+ }
+
+ ++iter;
+ framePtr = LiveSavedFrameCache::FramePtr::create(iter);
+
+ if (iter.activation() != &activation && capture.is<JS::AllFrames>()) {
+ // If there were no cache hits in the entire activation, clear its
+ // cache so we'll be able to push new ones when we build the
+ // SavedFrame chain.
+ activation.clearLiveSavedFrameCache();
+ }
+
+ // If we have crossed into a new activation, check whether the prior
+ // activation had an async parent set.
+ //
+ // If the async call was explicit (async function resumptions, most
+ // testing facilities), then the async parent stack has priority over
+ // any actual frames still on the JavaScript stack. If the async call
+ // was implicit (DOM CallbackObject::CallSetup calls), then the async
+ // parent stack is used only if there were no other frames on the
+ // stack.
+ //
+ // Captures using FirstSubsumedFrame expect us to ignore async parents.
+ if (iter.activation() != &activation && activation.asyncStack() &&
+ (activation.asyncCallIsExplicit() || iter.done()) &&
+ !capture.is<JS::FirstSubsumedFrame>()) {
+ // Atomize the async cause string. There should only be a few
+ // different strings used.
+ const char* cause = activation.asyncCause();
+ Rooted<JSAtom*> causeAtom(cx, AtomizeUTF8Chars(cx, cause, strlen(cause)));
+ if (!causeAtom) {
+ return false;
+ }
+
+ // Translate our capture into a frame count limit for
+ // adoptAsyncStack, which will impose further limits.
+ Maybe<size_t> maxFrames =
+ !capture.is<JS::MaxFrames>() ? Nothing()
+ : capture.as<JS::MaxFrames>().maxFrames == 0
+ ? Nothing()
+ : Some(capture.as<JS::MaxFrames>().maxFrames);
+
+ // Clip the stack if needed, attach the async cause string to the
+ // top frame, and copy it into our compartment if necessary.
+ Rooted<SavedFrame*> asyncParent(cx, activation.asyncStack());
+ if (!adoptAsyncStack(cx, &asyncParent, causeAtom, maxFrames)) {
+ return false;
+ }
+ stackChain[stackChain.length() - 1].setParent(asyncParent);
+ if (!capture.is<JS::AllFrames>() || unreachedEvalTargets.empty()) {
+ // In the case of a JS::AllFrames capture, we will be populating the
+ // LiveSavedFrameCache in the second loop. In the case where there is
+ // a debugger eval frame on the stack, the second loop will use
+ // checkForEvalInFramePrev to skip from the eval frame to the "prev"
+ // frame and assert that when this happens, the "prev"
+ // frame is in the cache. In cases where there is an async stack
+ // activation between the debugger eval frame and the "prev" frame,
+ // breaking here would not populate the "prev" cache entry, causing
+ // checkForEvalInFramePrev to fail.
+ break;
+ }
+ }
+
+ if (capture.is<JS::MaxFrames>()) {
+ capture.as<JS::MaxFrames>().maxFrames--;
+ }
+ }
+
+ // Iterate through |stackChain| in reverse order and get or create the
+ // actual SavedFrame instances.
+ frame.set(cachedParentFrame);
+ for (size_t i = stackChain.length(); i != 0; i--) {
+ MutableHandle<SavedFrame::Lookup> lookup = stackChain[i - 1];
+ if (!lookup.parent()) {
+ // The frame may already have an async parent frame set explicitly
+ // on its activation.
+ lookup.setParent(frame);
+ }
+
+ // If necessary, adjust the parent of a debugger eval frame to point to
+ // the frame in whose scope the eval occurs - if we're using
+ // LiveSavedFrameCache. Otherwise, we simply ask the FrameIter to follow
+ // evalInFramePrev links, so that the parent is always the last frame we
+ // created.
+ if (capture.is<JS::AllFrames>() && lookup.framePtr()) {
+ if (!checkForEvalInFramePrev(cx, lookup)) {
+ return false;
+ }
+ }
+
+ frame.set(getOrCreateSavedFrame(cx, lookup));
+ if (!frame) {
+ return false;
+ }
+
+ if (capture.is<JS::AllFrames>() && lookup.framePtr()) {
+ auto* cache = lookup.activation()->getLiveSavedFrameCache(cx);
+ if (!cache ||
+ !cache->insert(cx, *lookup.framePtr(), lookup.pc(), frame)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool SavedStacks::adoptAsyncStack(JSContext* cx,
+ MutableHandle<SavedFrame*> asyncStack,
+ Handle<JSAtom*> asyncCause,
+ const Maybe<size_t>& maxFrameCount) {
+ MOZ_ASSERT(asyncStack);
+ MOZ_ASSERT(asyncCause);
+
+ // If maxFrameCount is Nothing, the caller asked for an unlimited number of
+ // stack frames, but async stacks are not limited by the available stack
+ // memory, so we need to set an arbitrary limit when collecting them. We
+ // still don't enforce an upper limit if the caller requested more frames.
+ size_t maxFrames = maxFrameCount.valueOr(ASYNC_STACK_MAX_FRAME_COUNT);
+
+ // Turn the chain of frames starting with asyncStack into a vector of Lookup
+ // objects in |stackChain|, youngest to oldest.
+ Rooted<js::GCLookupVector> stackChain(cx, js::GCLookupVector(cx));
+ SavedFrame* currentSavedFrame = asyncStack;
+ while (currentSavedFrame && stackChain.length() < maxFrames) {
+ if (!stackChain.emplaceBack(*currentSavedFrame)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ currentSavedFrame = currentSavedFrame->getParent();
+ }
+
+ // Attach the asyncCause to the youngest frame.
+ stackChain[0].setAsyncCause(asyncCause);
+
+ // If we walked the entire stack, and it's in cx's realm, we don't
+ // need to rebuild the full chain again using the lookup objects - we can
+ // just use the existing chain. Only the asyncCause on the youngest frame
+ // needs to be changed.
+ if (currentSavedFrame == nullptr && asyncStack->realm() == cx->realm()) {
+ MutableHandle<SavedFrame::Lookup> lookup = stackChain[0];
+ lookup.setParent(asyncStack->getParent());
+ asyncStack.set(getOrCreateSavedFrame(cx, lookup));
+ return !!asyncStack;
+ }
+
+ // If we captured the maximum number of frames and the caller requested no
+ // specific limit, we only return half of them. This means that if we do
+ // many subsequent captures with the same async stack, it's likely we can
+ // use the optimization above.
+ if (maxFrameCount.isNothing() && currentSavedFrame) {
+ stackChain.shrinkBy(ASYNC_STACK_MAX_FRAME_COUNT / 2);
+ }
+
+ // Iterate through |stackChain| in reverse order and get or create the
+ // actual SavedFrame instances.
+ asyncStack.set(nullptr);
+ while (!stackChain.empty()) {
+ Rooted<SavedFrame::Lookup> lookup(cx, stackChain.back());
+ lookup.setParent(asyncStack);
+ asyncStack.set(getOrCreateSavedFrame(cx, lookup));
+ if (!asyncStack) {
+ return false;
+ }
+ stackChain.popBack();
+ }
+
+ return true;
+}
+
+// Given a |lookup| for which we're about to construct a SavedFrame, if it
+// refers to a Debugger eval frame, adjust |lookup|'s parent to be the frame's
+// evalInFramePrev target.
+//
+// Debugger eval frames run code in the scope of some random older frame on the
+// stack (the 'target' frame). It is our custom to report the target as the
+// immediate parent of the eval frame. The LiveSavedFrameCache requires us not
+// to skip frames, so instead we walk the entire stack, and just give Debugger
+// eval frames the right parents as we encounter them.
+//
+// Call this function only if we are using the LiveSavedFrameCache; otherwise,
+// FrameIter has already taken care of getting us the right parent.
+bool SavedStacks::checkForEvalInFramePrev(
+ JSContext* cx, MutableHandle<SavedFrame::Lookup> lookup) {
+ MOZ_ASSERT(lookup.framePtr());
+ if (!lookup.framePtr()->isInterpreterFrame()) {
+ return true;
+ }
+
+ InterpreterFrame& interpreterFrame = lookup.framePtr()->asInterpreterFrame();
+ if (!interpreterFrame.isDebuggerEvalFrame()) {
+ return true;
+ }
+
+ FrameIter iter(cx, FrameIter::IGNORE_DEBUGGER_EVAL_PREV_LINK);
+ while (!iter.done() &&
+ (!iter.hasUsableAbstractFramePtr() ||
+ iter.abstractFramePtr() != interpreterFrame.evalInFramePrev())) {
+ ++iter;
+ }
+
+ Maybe<LiveSavedFrameCache::FramePtr> maybeTarget =
+ LiveSavedFrameCache::FramePtr::create(iter);
+ MOZ_ASSERT(maybeTarget);
+
+ LiveSavedFrameCache::FramePtr target = *maybeTarget;
+
+ // If we're caching the frame to which |lookup| refers, then we should
+ // definitely have the target frame in the cache as well.
+ MOZ_ASSERT(target.hasCachedSavedFrame());
+
+ // Search the chain of activations for a LiveSavedFrameCache that has an
+ // entry for target.
+ Rooted<SavedFrame*> saved(cx, nullptr);
+ for (Activation* act = lookup.activation(); act; act = act->prev()) {
+ // It's okay to force allocation of a cache here; we're about to put
+ // something in the top cache, and all the lower ones should exist
+ // already.
+ auto* cache = act->getLiveSavedFrameCache(cx);
+ if (!cache) {
+ return false;
+ }
+
+ cache->findWithoutInvalidation(target, &saved);
+ if (saved) {
+ break;
+ }
+ }
+
+ // Since |target| has its cached bit set, we should have found it.
+ MOZ_ALWAYS_TRUE(saved);
+
+ // Because we use findWithoutInvalidation here, we can technically get a
+ // SavedFrame here for any realm. That shouldn't happen here because
+ // checkForEvalInFramePrev is only called _after_ the parent frames have
+ // been constructed, but if something prevents the chain from being properly
+ // reconstructed, that invariant could be accidentally broken.
+ MOZ_ASSERT(saved->realm() == cx->realm());
+
+ lookup.setParent(saved);
+ return true;
+}
+
+SavedFrame* SavedStacks::getOrCreateSavedFrame(
+ JSContext* cx, Handle<SavedFrame::Lookup> lookup) {
+ const SavedFrame::Lookup& lookupInstance = lookup.get();
+ DependentAddPtr<SavedFrame::Set> p(cx, frames, lookupInstance);
+ if (p) {
+ MOZ_ASSERT(*p);
+ return *p;
+ }
+
+ Rooted<SavedFrame*> frame(cx, createFrameFromLookup(cx, lookup));
+ if (!frame) {
+ return nullptr;
+ }
+
+ if (!p.add(cx, frames, lookupInstance, frame)) {
+ return nullptr;
+ }
+
+ return frame;
+}
+
+SavedFrame* SavedStacks::createFrameFromLookup(
+ JSContext* cx, Handle<SavedFrame::Lookup> lookup) {
+ Rooted<SavedFrame*> frame(cx, SavedFrame::create(cx));
+ if (!frame) {
+ return nullptr;
+ }
+ frame->initFromLookup(cx, lookup);
+
+ if (!FreezeObject(cx, frame)) {
+ return nullptr;
+ }
+
+ return frame;
+}
+
+bool SavedStacks::getLocation(JSContext* cx, const FrameIter& iter,
+ MutableHandle<LocationValue> locationp) {
+ // We should only ever be caching location values for scripts in this
+ // compartment. Otherwise, we would get dead cross-compartment scripts in
+ // the cache because our compartment's sweep method isn't called when their
+ // compartment gets collected.
+ MOZ_DIAGNOSTIC_ASSERT(&cx->realm()->savedStacks() == this);
+ cx->check(iter.compartment());
+
+ // When we have a |JSScript| for this frame, use a potentially memoized
+ // location from our PCLocationMap and copy it into |locationp|. When we do
+ // not have a |JSScript| for this frame (wasm frames), we take a slow path
+ // that doesn't employ memoization, and update |locationp|'s slots directly.
+
+ if (iter.isWasm()) {
+ // Only asm.js has a displayURL.
+ if (const char16_t* displayURL = iter.displayURL()) {
+ locationp.setSource(AtomizeChars(cx, displayURL, js_strlen(displayURL)));
+ } else {
+ const char* filename = iter.filename() ? iter.filename() : "";
+ locationp.setSource(AtomizeUTF8Chars(cx, filename, strlen(filename)));
+ }
+ if (!locationp.source()) {
+ return false;
+ }
+
+ // See WasmFrameIter::computeLine() comment.
+ uint32_t column = 0;
+ locationp.setLine(iter.computeLine(&column));
+ locationp.setColumn(column);
+ return true;
+ }
+
+ RootedScript script(cx, iter.script());
+ jsbytecode* pc = iter.pc();
+
+ PCLocationMap::AddPtr p = pcLocationMap.lookupForAdd(PCKey(script, pc));
+
+ if (!p) {
+ Rooted<JSAtom*> source(cx);
+ if (const char16_t* displayURL = iter.displayURL()) {
+ source = AtomizeChars(cx, displayURL, js_strlen(displayURL));
+ } else {
+ const char* filename = script->filename() ? script->filename() : "";
+ source = AtomizeUTF8Chars(cx, filename, strlen(filename));
+ }
+ if (!source) {
+ return false;
+ }
+
+ uint32_t sourceId = script->scriptSource()->id();
+ uint32_t column;
+ uint32_t line = PCToLineNumber(script, pc, &column);
+
+ // Make the column 1-based. See comment above.
+ PCKey key(script, pc);
+ LocationValue value(source, sourceId, line, column + 1);
+ if (!pcLocationMap.add(p, key, value)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ locationp.set(p->value());
+ return true;
+}
+
+void SavedStacks::chooseSamplingProbability(Realm* realm) {
+ {
+ JSRuntime* runtime = realm->runtimeFromMainThread();
+ if (runtime->recordAllocationCallback) {
+ // The runtime is tracking allocations across all realms, in this case
+ // ignore all of the debugger values, and use the runtime's probability.
+ this->setSamplingProbability(runtime->allocationSamplingProbability);
+ return;
+ }
+ }
+
+ // Use unbarriered version to prevent triggering read barrier while
+ // collecting, this is safe as long as global does not escape.
+ GlobalObject* global = realm->unsafeUnbarrieredMaybeGlobal();
+ if (!global) {
+ return;
+ }
+
+ Maybe<double> probability = DebugAPI::allocationSamplingProbability(global);
+ if (probability.isNothing()) {
+ return;
+ }
+
+ this->setSamplingProbability(*probability);
+}
+
+void SavedStacks::setSamplingProbability(double probability) {
+ if (!bernoulliSeeded) {
+ mozilla::Array<uint64_t, 2> seed;
+ GenerateXorShift128PlusSeed(seed);
+ bernoulli.setRandomState(seed[0], seed[1]);
+ bernoulliSeeded = true;
+ }
+
+ bernoulli.setProbability(probability);
+}
+
+JSObject* SavedStacks::MetadataBuilder::build(
+ JSContext* cx, HandleObject target,
+ AutoEnterOOMUnsafeRegion& oomUnsafe) const {
+ RootedObject obj(cx, target);
+
+ SavedStacks& stacks = cx->realm()->savedStacks();
+ if (!stacks.bernoulli.trial()) {
+ return nullptr;
+ }
+
+ Rooted<SavedFrame*> frame(cx);
+ if (!stacks.saveCurrentStack(cx, &frame)) {
+ oomUnsafe.crash("SavedStacksMetadataBuilder");
+ }
+
+ if (!DebugAPI::onLogAllocationSite(cx, obj, frame,
+ mozilla::TimeStamp::Now())) {
+ oomUnsafe.crash("SavedStacksMetadataBuilder");
+ }
+
+ auto recordAllocationCallback =
+ cx->realm()->runtimeFromMainThread()->recordAllocationCallback;
+ if (recordAllocationCallback) {
+ // The following code translates the JS-specific information, into an
+ // RecordAllocationInfo object that can be consumed outside of SpiderMonkey.
+
+ auto node = JS::ubi::Node(obj.get());
+
+ // Pass the non-SpiderMonkey specific information back to the
+ // callback to get it out of the JS engine.
+ recordAllocationCallback(JS::RecordAllocationInfo{
+ node.typeName(), node.jsObjectClassName(), node.descriptiveTypeName(),
+ JS::ubi::CoarseTypeToString(node.coarseType()),
+ node.size(cx->runtime()->debuggerMallocSizeOf),
+ gc::IsInsideNursery(obj)});
+ }
+
+ MOZ_ASSERT_IF(frame, !frame->is<WrapperObject>());
+ return frame;
+}
+
+const SavedStacks::MetadataBuilder SavedStacks::metadataBuilder;
+
+/* static */
+ReconstructedSavedFramePrincipals ReconstructedSavedFramePrincipals::IsSystem;
+/* static */
+ReconstructedSavedFramePrincipals
+ ReconstructedSavedFramePrincipals::IsNotSystem;
+
+UniqueChars BuildUTF8StackString(JSContext* cx, JSPrincipals* principals,
+ HandleObject stack) {
+ RootedString stackStr(cx);
+ if (!JS::BuildStackString(cx, principals, stack, &stackStr)) {
+ return nullptr;
+ }
+
+ return JS_EncodeStringToUTF8(cx, stackStr);
+}
+
+uint32_t FixupColumnForDisplay(uint32_t column) {
+ // As described in WasmFrameIter::computeLine(), for wasm frames, the
+ // function index is returned as the column with the high bit set. In paths
+ // that format error stacks into strings, this information can be used to
+ // synthesize a proper wasm frame. But when raw column numbers are handed
+ // out, we just fix them to 1 to avoid confusion.
+ if (column & wasm::WasmFrameIter::ColumnBit) {
+ return 1;
+ }
+
+ // XXX: Make the column 1-based as in other browsers, instead of 0-based
+ // which is how SpiderMonkey stores it internally. This will be
+ // unnecessary once bug 1144340 is fixed.
+ return column + 1;
+}
+
+} /* namespace js */
+
+namespace JS {
+namespace ubi {
+
+bool ConcreteStackFrame<SavedFrame>::isSystem() const {
+ auto trustedPrincipals = get().runtimeFromAnyThread()->trustedPrincipals();
+ return get().getPrincipals() == trustedPrincipals ||
+ get().getPrincipals() ==
+ &js::ReconstructedSavedFramePrincipals::IsSystem;
+}
+
+bool ConcreteStackFrame<SavedFrame>::constructSavedFrameStack(
+ JSContext* cx, MutableHandleObject outSavedFrameStack) const {
+ outSavedFrameStack.set(&get());
+ if (!cx->compartment()->wrap(cx, outSavedFrameStack)) {
+ outSavedFrameStack.set(nullptr);
+ return false;
+ }
+ return true;
+}
+
+// A `mozilla::Variant` matcher that converts the inner value of a
+// `JS::ubi::AtomOrTwoByteChars` string to a `JSAtom*`.
+struct MOZ_STACK_CLASS AtomizingMatcher {
+ JSContext* cx;
+ size_t length;
+
+ explicit AtomizingMatcher(JSContext* cx, size_t length)
+ : cx(cx), length(length) {}
+
+ JSAtom* operator()(JSAtom* atom) {
+ MOZ_ASSERT(atom);
+ return atom;
+ }
+
+ JSAtom* operator()(const char16_t* chars) {
+ MOZ_ASSERT(chars);
+ return AtomizeChars(cx, chars, length);
+ }
+};
+
+JS_PUBLIC_API bool ConstructSavedFrameStackSlow(
+ JSContext* cx, JS::ubi::StackFrame& frame,
+ MutableHandleObject outSavedFrameStack) {
+ Rooted<js::GCLookupVector> stackChain(cx, js::GCLookupVector(cx));
+ Rooted<JS::ubi::StackFrame> ubiFrame(cx, frame);
+
+ while (ubiFrame.get()) {
+ // Convert the source and functionDisplayName strings to atoms.
+
+ Rooted<JSAtom*> source(cx);
+ AtomizingMatcher atomizer(cx, ubiFrame.get().sourceLength());
+ source = ubiFrame.get().source().match(atomizer);
+ if (!source) {
+ return false;
+ }
+
+ Rooted<JSAtom*> functionDisplayName(cx);
+ auto nameLength = ubiFrame.get().functionDisplayNameLength();
+ if (nameLength > 0) {
+ AtomizingMatcher atomizer(cx, nameLength);
+ functionDisplayName =
+ ubiFrame.get().functionDisplayName().match(atomizer);
+ if (!functionDisplayName) {
+ return false;
+ }
+ }
+
+ auto principals =
+ js::ReconstructedSavedFramePrincipals::getSingleton(ubiFrame.get());
+
+ if (!stackChain.emplaceBack(source, ubiFrame.get().sourceId(),
+ ubiFrame.get().line(), ubiFrame.get().column(),
+ functionDisplayName,
+ /* asyncCause */ nullptr,
+ /* parent */ nullptr, principals,
+ /* mutedErrors */ true)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ ubiFrame = ubiFrame.get().parent();
+ }
+
+ Rooted<js::SavedFrame*> parentFrame(cx);
+ for (size_t i = stackChain.length(); i != 0; i--) {
+ MutableHandle<SavedFrame::Lookup> lookup = stackChain[i - 1];
+ lookup.setParent(parentFrame);
+ parentFrame = cx->realm()->savedStacks().getOrCreateSavedFrame(cx, lookup);
+ if (!parentFrame) {
+ return false;
+ }
+ }
+
+ outSavedFrameStack.set(parentFrame);
+ return true;
+}
+
+} // namespace ubi
+} // namespace JS
diff --git a/js/src/vm/SavedStacks.h b/js/src/vm/SavedStacks.h
new file mode 100644
index 0000000000..b038419ada
--- /dev/null
+++ b/js/src/vm/SavedStacks.h
@@ -0,0 +1,342 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_SavedStacks_h
+#define vm_SavedStacks_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/FastBernoulliTrial.h"
+#include "mozilla/Maybe.h"
+
+#include "js/HashTable.h"
+#include "js/Stack.h"
+#include "vm/SavedFrame.h"
+
+namespace JS {
+enum class SavedFrameSelfHosted;
+}
+
+namespace js {
+
+class FrameIter;
+
+// # Saved Stacks
+//
+// The `SavedStacks` class provides a compact way to capture and save JS stacks
+// as `SavedFrame` `JSObject` subclasses. A single `SavedFrame` object
+// represents one frame that was on the stack, and has a strong reference to its
+// parent `SavedFrame` (the next youngest frame). This reference is null when
+// the `SavedFrame` object is the oldest frame that was on the stack.
+//
+// This comment documents implementation. For usage documentation, see the
+// `js/src/doc/SavedFrame/SavedFrame.md` file and relevant `SavedFrame`
+// functions in `js/src/jsapi.h`.
+//
+// ## Compact
+//
+// Older saved stack frame tails are shared via hash consing, to deduplicate
+// structurally identical data. `SavedStacks` contains a hash table of weakly
+// held `SavedFrame` objects, and when the owning compartment is swept, it
+// removes entries from this table that aren't held alive in any other way. When
+// saving new stacks, we use this table to find pre-existing `SavedFrame`
+// objects. If such an object is already extant, it is reused; otherwise a new
+// `SavedFrame` is allocated and inserted into the table.
+//
+// Naive | Hash Consing
+// --------------+------------------
+// c -> b -> a | c -> b -> a
+// | ^
+// d -> b -> a | d ---|
+// | |
+// e -> b -> a | e ---'
+//
+// This technique is effective because of the nature of the events that trigger
+// capturing the stack. Currently, these events consist primarily of `JSObject`
+// allocation (when an observing `Debugger` has such tracking), `Promise`
+// settlement, and `Error` object creation. While these events may occur many
+// times, they tend to occur only at a few locations in the JS source. For
+// example, if we enable Object allocation tracking and run the esprima
+// JavaScript parser on its own JavaScript source, there are approximately 54700
+// total `Object` allocations, but just ~1400 unique JS stacks at allocation
+// time. There's only ~200 allocation sites if we capture only the youngest
+// stack frame.
+//
+// ## Security and Wrappers
+//
+// We save every frame on the stack, regardless of whether the `SavedStack`'s
+// compartment's principals subsume the frame's compartment's principals or
+// not. This gives us maximum flexibility down the line when accessing and
+// presenting captured stacks, but at the price of some complication involved in
+// preventing the leakage of privileged stack frames to unprivileged callers.
+//
+// When a `SavedFrame` method or accessor is called, we compare the caller's
+// compartment's principals to each `SavedFrame`'s captured principals. We avoid
+// using the usual `CallNonGenericMethod` and `nativeCall` machinery which
+// enters the `SavedFrame` object's compartment before we can check these
+// principals, because we need access to the original caller's compartment's
+// principals (unlike other `CallNonGenericMethod` users) to determine what view
+// of the stack to present. Instead, we take a similar approach to that used by
+// DOM methods, and manually unwrap wrappers until we get the underlying
+// `SavedFrame` object, find the first `SavedFrame` in its stack whose captured
+// principals are subsumed by the caller's principals, access the reserved slots
+// we care about, and then rewrap return values as necessary.
+//
+// Consider the following diagram:
+//
+// Content Compartment
+// +---------------------------------------+
+// | |
+// | +------------------------+ |
+// Chrome Compartment | | | |
+// +--------------------+ | | SavedFrame C (content) | |
+// | | | | | |
+// | +--------------+ +------------------------+ |
+// | | | ^ |
+// | var x -----> | Xray Wrapper |-----. | |
+// | | | | | |
+// | +--------------+ | +------------------------+ |
+// | | | | | | |
+// | +--------------+ | | SavedFrame B (content) | |
+// | | | | | | |
+// | var y -----> | CCW (waived) |--. | +------------------------+ |
+// | | | | | ^ |
+// | +--------------+ | | | |
+// | | | | | | |
+// +--------------------+ | | | +------------------------+ |
+// | | '-> | | |
+// | | | SavedFrame A (chrome) | |
+// | '----> | | |
+// | +------------------------+ |
+// | ^ |
+// | | |
+// | var z -----' |
+// | |
+// +---------------------------------------+
+//
+// CCW is a plain cross-compartment wrapper, yielded by waiving Xray vision. A
+// is the youngest `SavedFrame` and represents a frame that was from the chrome
+// compartment, while B and C are from frames from the content compartment. C is
+// the oldest frame.
+//
+// Note that it is always safe to waive an Xray around a SavedFrame object,
+// because SavedFrame objects and the SavedFrame prototype are always frozen you
+// will never run untrusted code.
+//
+// Depending on who the caller is, the view of the stack will be different, and
+// is summarized in the table below.
+//
+// Var | View
+// -----+------------
+// x | A -> B -> C
+// y, z | B -> C
+//
+// In the case of x, the `SavedFrame` accessors are called with an Xray wrapper
+// around the `SavedFrame` object as the `this` value, and the chrome
+// compartment as the cx's current principals. Because the chrome compartment's
+// principals subsume both itself and the content compartment's principals, x
+// has the complete view of the stack.
+//
+// In the case of y, the cross-compartment machinery automatically enters the
+// content compartment, and calls the `SavedFrame` accessors with the wrapped
+// `SavedFrame` object as the `this` value. Because the cx's current compartment
+// is the content compartment, and the content compartment's principals do not
+// subsume the chrome compartment's principals, it can only see the B and C
+// frames.
+//
+// In the case of z, the `SavedFrame` accessors are called with the `SavedFrame`
+// object in the `this` value, and the content compartment as the cx's current
+// compartment. Similar to the case of y, only the B and C frames are exposed
+// because the cx's current compartment's principals do not subsume A's captured
+// principals.
+
+class SavedStacks {
+ friend class SavedFrame;
+ friend bool JS::ubi::ConstructSavedFrameStackSlow(
+ JSContext* cx, JS::ubi::StackFrame& ubiFrame,
+ MutableHandleObject outSavedFrameStack);
+
+ public:
+ SavedStacks()
+ : frames(),
+ bernoulliSeeded(false),
+ bernoulli(1.0, 0x59fdad7f6b4cc573, 0x91adf38db96a9354),
+ creatingSavedFrame(false) {}
+
+ [[nodiscard]] bool saveCurrentStack(
+ JSContext* cx, MutableHandle<SavedFrame*> frame,
+ JS::StackCapture&& capture = JS::StackCapture(JS::AllFrames()));
+ [[nodiscard]] bool copyAsyncStack(
+ JSContext* cx, HandleObject asyncStack, HandleString asyncCause,
+ MutableHandle<SavedFrame*> adoptedStack,
+ const mozilla::Maybe<size_t>& maxFrameCount);
+ void traceWeak(JSTracer* trc);
+ void trace(JSTracer* trc);
+ uint32_t count();
+ void clear();
+ void chooseSamplingProbability(JS::Realm* realm);
+
+ // Set the sampling random number generator's state to |state0| and
+ // |state1|. One or the other must be non-zero. See the comments for
+ // mozilla::non_crypto::XorShift128PlusRNG::setState for details.
+ void setRNGState(uint64_t state0, uint64_t state1) {
+ bernoulli.setRandomState(state0, state1);
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
+
+ // An alloction metadata builder that marks cells with the JavaScript stack
+ // at which they were allocated.
+ struct MetadataBuilder : public AllocationMetadataBuilder {
+ MetadataBuilder() : AllocationMetadataBuilder() {}
+ virtual JSObject* build(JSContext* cx, HandleObject obj,
+ AutoEnterOOMUnsafeRegion& oomUnsafe) const override;
+ };
+
+ static const MetadataBuilder metadataBuilder;
+
+ private:
+ SavedFrame::Set frames;
+ bool bernoulliSeeded;
+ mozilla::FastBernoulliTrial bernoulli;
+ bool creatingSavedFrame;
+
+ // Similar to mozilla::ReentrancyGuard, but instead of asserting against
+ // reentrancy, just change the behavior of SavedStacks::saveCurrentStack to
+ // return a nullptr SavedFrame.
+ struct MOZ_RAII AutoReentrancyGuard {
+ SavedStacks& stacks;
+
+ explicit AutoReentrancyGuard(SavedStacks& stacks) : stacks(stacks) {
+ stacks.creatingSavedFrame = true;
+ }
+
+ ~AutoReentrancyGuard() { stacks.creatingSavedFrame = false; }
+ };
+
+ [[nodiscard]] bool insertFrames(JSContext* cx,
+ MutableHandle<SavedFrame*> frame,
+ JS::StackCapture&& capture);
+ [[nodiscard]] bool adoptAsyncStack(
+ JSContext* cx, MutableHandle<SavedFrame*> asyncStack,
+ Handle<JSAtom*> asyncCause, const mozilla::Maybe<size_t>& maxFrameCount);
+ [[nodiscard]] bool checkForEvalInFramePrev(
+ JSContext* cx, MutableHandle<SavedFrame::Lookup> lookup);
+ SavedFrame* getOrCreateSavedFrame(JSContext* cx,
+ Handle<SavedFrame::Lookup> lookup);
+ SavedFrame* createFrameFromLookup(JSContext* cx,
+ Handle<SavedFrame::Lookup> lookup);
+ void setSamplingProbability(double probability);
+
+ // Cache for memoizing PCToLineNumber lookups.
+
+ struct PCKey {
+ PCKey(JSScript* script, jsbytecode* pc) : script(script), pc(pc) {}
+
+ WeakHeapPtr<JSScript*> script;
+ jsbytecode* pc;
+
+ void trace(JSTracer* trc) { /* PCKey is weak. */
+ }
+ bool traceWeak(JSTracer* trc) {
+ return TraceWeakEdge(trc, &script, "traceWeak");
+ }
+ };
+
+ public:
+ struct LocationValue {
+ LocationValue() : source(nullptr), sourceId(0), line(0), column(0) {}
+ LocationValue(JSAtom* source, uint32_t sourceId, size_t line,
+ uint32_t column)
+ : source(source), sourceId(sourceId), line(line), column(column) {}
+
+ void trace(JSTracer* trc) {
+ TraceNullableEdge(trc, &source, "SavedStacks::LocationValue::source");
+ }
+
+ bool traceWeak(JSTracer* trc) {
+ MOZ_ASSERT(source);
+ // TODO: Bug 1501334: IsAboutToBeFinalized doesn't work for atoms.
+ // Otherwise we should assert TraceWeakEdge always returns true;
+ return TraceWeakEdge(trc, &source, "traceWeak");
+ }
+
+ WeakHeapPtr<JSAtom*> source;
+ uint32_t sourceId;
+ size_t line;
+ uint32_t column;
+ };
+
+ private:
+ struct PCLocationHasher : public DefaultHasher<PCKey> {
+ using ScriptPtrHasher = DefaultHasher<JSScript*>;
+ using BytecodePtrHasher = DefaultHasher<jsbytecode*>;
+
+ static HashNumber hash(const PCKey& key) {
+ return mozilla::AddToHash(ScriptPtrHasher::hash(key.script),
+ BytecodePtrHasher::hash(key.pc));
+ }
+
+ static bool match(const PCKey& l, const PCKey& k) {
+ return ScriptPtrHasher::match(l.script, k.script) &&
+ BytecodePtrHasher::match(l.pc, k.pc);
+ }
+ };
+
+ // We eagerly Atomize the script source stored in LocationValue because
+ // wasm does not always have a JSScript and the source might not be
+ // available when we need it later. However, since the JSScript does not
+ // actually hold this atom, we have to trace it strongly to keep it alive.
+ // Thus, it takes two GC passes to fully clean up this table: the first GC
+ // removes the dead script; the second will clear out the source atom since
+ // it is no longer held by the table.
+ using PCLocationMap =
+ GCHashMap<PCKey, LocationValue, PCLocationHasher, SystemAllocPolicy>;
+ PCLocationMap pcLocationMap;
+
+ [[nodiscard]] bool getLocation(JSContext* cx, const FrameIter& iter,
+ MutableHandle<LocationValue> locationp);
+};
+
+template <typename Wrapper>
+struct WrappedPtrOperations<SavedStacks::LocationValue, Wrapper> {
+ JSAtom* source() const { return loc().source; }
+ uint32_t sourceId() const { return loc().sourceId; }
+ size_t line() const { return loc().line; }
+ uint32_t column() const { return loc().column; }
+
+ private:
+ const SavedStacks::LocationValue& loc() const {
+ return static_cast<const Wrapper*>(this)->get();
+ }
+};
+
+template <typename Wrapper>
+struct MutableWrappedPtrOperations<SavedStacks::LocationValue, Wrapper>
+ : public WrappedPtrOperations<SavedStacks::LocationValue, Wrapper> {
+ void setSource(JSAtom* v) { loc().source = v; }
+ void setSourceId(uint32_t v) { loc().sourceId = v; }
+ void setLine(size_t v) { loc().line = v; }
+ void setColumn(uint32_t v) { loc().column = v; }
+
+ private:
+ SavedStacks::LocationValue& loc() {
+ return static_cast<Wrapper*>(this)->get();
+ }
+};
+
+JS::UniqueChars BuildUTF8StackString(JSContext* cx, JSPrincipals* principals,
+ HandleObject stack);
+
+uint32_t FixupColumnForDisplay(uint32_t column);
+
+js::SavedFrame* UnwrapSavedFrame(JSContext* cx, JSPrincipals* principals,
+ HandleObject obj,
+ JS::SavedFrameSelfHosted selfHosted,
+ bool& skippedAsync);
+
+} /* namespace js */
+
+#endif /* vm_SavedStacks_h */
diff --git a/js/src/vm/Scope.cpp b/js/src/vm/Scope.cpp
new file mode 100644
index 0000000000..91bc8040c4
--- /dev/null
+++ b/js/src/vm/Scope.cpp
@@ -0,0 +1,1728 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/Scope.h"
+
+#include <new>
+
+#include "jsnum.h"
+
+#include "frontend/CompilationStencil.h" // ScopeStencilRef, CompilationStencil, CompilationState, CompilationAtomCache
+#include "frontend/ParserAtom.h" // frontend::ParserAtomsTable, frontend::ParserAtom
+#include "frontend/ScriptIndex.h" // ScriptIndex
+#include "frontend/Stencil.h"
+#include "gc/Allocator.h"
+#include "util/StringBuffer.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/ErrorReporting.h" // MaybePrintAndClearPendingException
+#include "vm/JSScript.h"
+#include "wasm/WasmDebug.h"
+#include "wasm/WasmInstance.h"
+
+#include "gc/GCContext-inl.h"
+#include "gc/ObjectKind-inl.h"
+#include "gc/TraceMethods-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::frontend;
+
+using mozilla::Maybe;
+
+const char* js::BindingKindString(BindingKind kind) {
+ switch (kind) {
+ case BindingKind::Import:
+ return "import";
+ case BindingKind::FormalParameter:
+ return "formal parameter";
+ case BindingKind::Var:
+ return "var";
+ case BindingKind::Let:
+ return "let";
+ case BindingKind::Const:
+ return "const";
+ case BindingKind::NamedLambdaCallee:
+ return "named lambda callee";
+ case BindingKind::Synthetic:
+ return "synthetic";
+ case BindingKind::PrivateMethod:
+ return "private method";
+ }
+ MOZ_CRASH("Bad BindingKind");
+}
+
+const char* js::ScopeKindString(ScopeKind kind) {
+ switch (kind) {
+ case ScopeKind::Function:
+ return "function";
+ case ScopeKind::FunctionBodyVar:
+ return "function body var";
+ case ScopeKind::Lexical:
+ return "lexical";
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ return "catch";
+ case ScopeKind::NamedLambda:
+ return "named lambda";
+ case ScopeKind::StrictNamedLambda:
+ return "strict named lambda";
+ case ScopeKind::FunctionLexical:
+ return "function lexical";
+ case ScopeKind::ClassBody:
+ return "class body";
+ case ScopeKind::With:
+ return "with";
+ case ScopeKind::Eval:
+ return "eval";
+ case ScopeKind::StrictEval:
+ return "strict eval";
+ case ScopeKind::Global:
+ return "global";
+ case ScopeKind::NonSyntactic:
+ return "non-syntactic";
+ case ScopeKind::Module:
+ return "module";
+ case ScopeKind::WasmInstance:
+ return "wasm instance";
+ case ScopeKind::WasmFunction:
+ return "wasm function";
+ }
+ MOZ_CRASH("Bad ScopeKind");
+}
+
+SharedShape* js::EmptyEnvironmentShape(JSContext* cx, const JSClass* cls,
+ uint32_t numSlots,
+ ObjectFlags objectFlags) {
+ // Put as many slots into the object header as possible.
+ uint32_t numFixed = gc::GetGCKindSlots(gc::GetGCObjectKind(numSlots));
+ return SharedShape::getInitialShape(
+ cx, cls, cx->realm(), TaggedProto(nullptr), numFixed, objectFlags);
+}
+
+static bool AddToEnvironmentMap(JSContext* cx, const JSClass* clasp,
+ HandleId id, BindingKind bindKind,
+ uint32_t slot,
+ MutableHandle<SharedPropMap*> map,
+ uint32_t* mapLength, ObjectFlags* objectFlags) {
+ PropertyFlags propFlags = {PropertyFlag::Enumerable};
+ switch (bindKind) {
+ case BindingKind::Const:
+ case BindingKind::NamedLambdaCallee:
+ // Non-writable.
+ break;
+ default:
+ propFlags.setFlag(PropertyFlag::Writable);
+ break;
+ }
+
+ return SharedPropMap::addPropertyWithKnownSlot(cx, clasp, map, mapLength, id,
+ propFlags, slot, objectFlags);
+}
+
+SharedShape* js::CreateEnvironmentShape(JSContext* cx, BindingIter& bi,
+ const JSClass* cls, uint32_t numSlots,
+ ObjectFlags objectFlags) {
+ Rooted<SharedPropMap*> map(cx);
+ uint32_t mapLength = 0;
+
+ RootedId id(cx);
+ for (; bi; bi++) {
+ BindingLocation loc = bi.location();
+ if (loc.kind() == BindingLocation::Kind::Environment) {
+ JSAtom* name = bi.name();
+ MOZ_ASSERT(AtomIsMarked(cx->zone(), name));
+ id = NameToId(name->asPropertyName());
+ if (!AddToEnvironmentMap(cx, cls, id, bi.kind(), loc.slot(), &map,
+ &mapLength, &objectFlags)) {
+ return nullptr;
+ }
+ }
+ }
+
+ uint32_t numFixed = gc::GetGCKindSlots(gc::GetGCObjectKind(numSlots));
+ return SharedShape::getInitialOrPropMapShape(cx, cls, cx->realm(),
+ TaggedProto(nullptr), numFixed,
+ map, mapLength, objectFlags);
+}
+
+template <class DataT>
+inline size_t SizeOfAllocatedData(DataT* data) {
+ return SizeOfScopeData<DataT>(data->length);
+}
+
+template <typename ConcreteScope>
+static void MarkParserScopeData(typename ConcreteScope::ParserData* data,
+ frontend::CompilationState& compilationState) {
+ auto names = GetScopeDataTrailingNames(data);
+ for (auto& binding : names) {
+ auto index = binding.name();
+ if (!index) {
+ continue;
+ }
+ compilationState.parserAtoms.markUsedByStencil(
+ index, frontend::ParserAtom::Atomize::Yes);
+ }
+}
+
+template <typename ConcreteScope, typename EnvironmentT>
+static void PrepareScopeData(ParserBindingIter& bi,
+ typename ConcreteScope::ParserData* data,
+ uint32_t firstFrameSlot,
+ mozilla::Maybe<uint32_t>* envShape) {
+ const JSClass* cls = &EnvironmentT::class_;
+
+ // Iterate through all bindings. This counts the number of environment
+ // slots needed and computes the maximum frame slot.
+ while (bi) {
+ bi++;
+ }
+ data->slotInfo.nextFrameSlot =
+ bi.canHaveFrameSlots() ? bi.nextFrameSlot() : LOCALNO_LIMIT;
+
+ // Make a new environment shape if any environment slots were used.
+ if (bi.nextEnvironmentSlot() != JSSLOT_FREE(cls)) {
+ envShape->emplace(bi.nextEnvironmentSlot());
+ }
+}
+
+template <typename ConcreteScope>
+static typename ConcreteScope::ParserData* NewEmptyParserScopeData(
+ FrontendContext* fc, LifoAlloc& alloc, uint32_t length = 0) {
+ using Data = typename ConcreteScope::ParserData;
+
+ size_t dataSize = SizeOfScopeData<Data>(length);
+ void* raw = alloc.alloc(dataSize);
+ if (!raw) {
+ js::ReportOutOfMemory(fc);
+ return nullptr;
+ }
+
+ return new (raw) Data(length);
+}
+
+template <typename ConcreteScope, typename AtomT>
+static UniquePtr<AbstractScopeData<ConcreteScope, AtomT>> NewEmptyScopeData(
+ JSContext* cx, uint32_t length = 0) {
+ using Data = AbstractScopeData<ConcreteScope, AtomT>;
+
+ size_t dataSize = SizeOfScopeData<Data>(length);
+ uint8_t* bytes = cx->pod_malloc<uint8_t>(dataSize);
+ auto data = reinterpret_cast<Data*>(bytes);
+ if (data) {
+ new (data) Data(length);
+ }
+ return UniquePtr<Data>(data);
+}
+
+template <typename ConcreteScope>
+static UniquePtr<typename ConcreteScope::RuntimeData> LiftParserScopeData(
+ JSContext* cx, frontend::CompilationAtomCache& atomCache,
+ BaseParserScopeData* baseData) {
+ using ConcreteData = typename ConcreteScope::RuntimeData;
+
+ auto* data = static_cast<typename ConcreteScope::ParserData*>(baseData);
+
+ // Convert all scope ParserAtoms to rooted JSAtoms.
+ // Rooting is necessary as conversion can gc.
+ JS::RootedVector<JSAtom*> jsatoms(cx);
+ if (!jsatoms.reserve(data->length)) {
+ return nullptr;
+ }
+ auto names = GetScopeDataTrailingNames(data);
+ for (size_t i = 0; i < names.size(); i++) {
+ JSAtom* jsatom = nullptr;
+ if (names[i].name()) {
+ jsatom = atomCache.getExistingAtomAt(cx, names[i].name());
+ MOZ_ASSERT(jsatom);
+ }
+ jsatoms.infallibleAppend(jsatom);
+ }
+
+ // Allocate a new scope-data of the right kind.
+ UniquePtr<ConcreteData> scopeData(
+ NewEmptyScopeData<ConcreteScope, JSAtom>(cx, data->length));
+ if (!scopeData) {
+ return nullptr;
+ }
+
+ // NOTE: There shouldn't be any fallible operation or GC between setting
+ // `length` and filling `trailingNames`.
+ scopeData.get()->length = data->length;
+
+ memcpy(&scopeData.get()->slotInfo, &data->slotInfo,
+ sizeof(typename ConcreteScope::SlotInfo));
+
+ // Initialize new scoped names.
+ auto namesOut = GetScopeDataTrailingNames(scopeData.get());
+ MOZ_ASSERT(data->length == namesOut.size());
+ for (size_t i = 0; i < namesOut.size(); i++) {
+ namesOut[i] = names[i].copyWithNewAtom(jsatoms[i].get());
+ }
+
+ return scopeData;
+}
+
+/* static */
+Scope* Scope::create(JSContext* cx, ScopeKind kind, Handle<Scope*> enclosing,
+ Handle<SharedShape*> envShape) {
+ return cx->newCell<Scope>(kind, enclosing, envShape);
+}
+
+template <typename ConcreteScope>
+/* static */
+ConcreteScope* Scope::create(
+ JSContext* cx, ScopeKind kind, Handle<Scope*> enclosing,
+ Handle<SharedShape*> envShape,
+ MutableHandle<UniquePtr<typename ConcreteScope::RuntimeData>> data) {
+ Scope* scope = create(cx, kind, enclosing, envShape);
+ if (!scope) {
+ return nullptr;
+ }
+
+ // It is an invariant that all Scopes that have data (currently, all
+ // ScopeKinds except With) must have non-null data.
+ MOZ_ASSERT(data);
+ scope->initData<ConcreteScope>(data);
+
+ return &scope->as<ConcreteScope>();
+}
+
+template <typename ConcreteScope>
+inline void Scope::initData(
+ MutableHandle<UniquePtr<typename ConcreteScope::RuntimeData>> data) {
+ MOZ_ASSERT(!rawData());
+
+ AddCellMemory(this, SizeOfAllocatedData(data.get().get()),
+ MemoryUse::ScopeData);
+
+ setHeaderPtr(data.get().release());
+}
+
+void Scope::updateEnvShapeIfRequired(mozilla::Maybe<uint32_t>* envShape,
+ bool needsEnvironment) {
+ if (envShape->isNothing() && needsEnvironment) {
+ uint32_t numSlots = 0;
+ envShape->emplace(numSlots);
+ }
+}
+
+uint32_t Scope::firstFrameSlot() const {
+ switch (kind()) {
+ case ScopeKind::Lexical:
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ case ScopeKind::FunctionLexical:
+ // For intra-frame scopes, find the enclosing scope's next frame slot.
+ MOZ_ASSERT(is<LexicalScope>());
+ return LexicalScope::nextFrameSlot(enclosing());
+
+ case ScopeKind::NamedLambda:
+ case ScopeKind::StrictNamedLambda:
+ // Named lambda scopes cannot have frame slots.
+ return LOCALNO_LIMIT;
+
+ case ScopeKind::ClassBody:
+ MOZ_ASSERT(is<ClassBodyScope>());
+ return ClassBodyScope::nextFrameSlot(enclosing());
+
+ case ScopeKind::FunctionBodyVar:
+ if (enclosing()->is<FunctionScope>()) {
+ return enclosing()->as<FunctionScope>().nextFrameSlot();
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+uint32_t Scope::chainLength() const {
+ uint32_t length = 0;
+ for (ScopeIter si(const_cast<Scope*>(this)); si; si++) {
+ length++;
+ }
+ return length;
+}
+
+uint32_t Scope::environmentChainLength() const {
+ uint32_t length = 0;
+ for (ScopeIter si(const_cast<Scope*>(this)); si; si++) {
+ if (si.hasSyntacticEnvironment()) {
+ length++;
+ }
+ }
+ return length;
+}
+
+void Scope::finalize(JS::GCContext* gcx) {
+ MOZ_ASSERT(CurrentThreadIsGCFinalizing());
+ applyScopeDataTyped([this, gcx](auto data) {
+ gcx->delete_(this, data, SizeOfAllocatedData(data), MemoryUse::ScopeData);
+ });
+ setHeaderPtr(nullptr);
+}
+
+size_t Scope::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ if (rawData()) {
+ return mallocSizeOf(rawData());
+ }
+ return 0;
+}
+
+void Scope::dump() {
+ JSContext* cx = TlsContext.get();
+ if (!cx) {
+ fprintf(stderr, "*** can't get JSContext for current thread\n");
+ return;
+ }
+ for (Rooted<ScopeIter> si(cx, ScopeIter(this)); si; si++) {
+ fprintf(stderr, "- %s [%p]\n", ScopeKindString(si.kind()), si.scope());
+ DumpBindings(cx, si.scope());
+ fprintf(stderr, "\n");
+ }
+ fprintf(stderr, "\n");
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+
+/* static */
+bool Scope::dumpForDisassemble(JSContext* cx, JS::Handle<Scope*> scope,
+ GenericPrinter& out, const char* indent) {
+ if (!out.put(ScopeKindString(scope->kind()))) {
+ return false;
+ }
+ if (!out.put(" {")) {
+ return false;
+ }
+
+ size_t i = 0;
+ for (Rooted<BindingIter> bi(cx, BindingIter(scope)); bi; bi++, i++) {
+ if (i == 0) {
+ if (!out.put("\n")) {
+ return false;
+ }
+ }
+ UniqueChars bytes = AtomToPrintableString(cx, bi.name());
+ if (!bytes) {
+ return false;
+ }
+ if (!out.put(indent)) {
+ return false;
+ }
+ if (!out.printf(" %2zu: %s %s ", i, BindingKindString(bi.kind()),
+ bytes.get())) {
+ return false;
+ }
+ switch (bi.location().kind()) {
+ case BindingLocation::Kind::Global:
+ if (bi.isTopLevelFunction()) {
+ if (!out.put("(global function)\n")) {
+ return false;
+ }
+ } else {
+ if (!out.put("(global)\n")) {
+ return false;
+ }
+ }
+ break;
+ case BindingLocation::Kind::Argument:
+ if (!out.printf("(arg slot %u)\n", bi.location().argumentSlot())) {
+ return false;
+ }
+ break;
+ case BindingLocation::Kind::Frame:
+ if (!out.printf("(frame slot %u)\n", bi.location().slot())) {
+ return false;
+ }
+ break;
+ case BindingLocation::Kind::Environment:
+ if (!out.printf("(env slot %u)\n", bi.location().slot())) {
+ return false;
+ }
+ break;
+ case BindingLocation::Kind::NamedLambdaCallee:
+ if (!out.put("(named lambda callee)\n")) {
+ return false;
+ }
+ break;
+ case BindingLocation::Kind::Import:
+ if (!out.put("(import)\n")) {
+ return false;
+ }
+ break;
+ }
+ }
+ if (i > 0) {
+ if (!out.put(indent)) {
+ return false;
+ }
+ }
+ if (!out.put("}")) {
+ return false;
+ }
+
+ ScopeIter si(scope);
+ si++;
+ for (; si; si++) {
+ if (!out.put(" -> ")) {
+ return false;
+ }
+ if (!out.put(ScopeKindString(si.kind()))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
+
+static uint32_t NextFrameSlot(Scope* scope) {
+ for (ScopeIter si(scope); si; si++) {
+ switch (si.kind()) {
+ case ScopeKind::With:
+ continue;
+
+ case ScopeKind::Function:
+ return si.scope()->as<FunctionScope>().nextFrameSlot();
+
+ case ScopeKind::FunctionBodyVar:
+ return si.scope()->as<VarScope>().nextFrameSlot();
+
+ case ScopeKind::Lexical:
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ case ScopeKind::FunctionLexical:
+ return si.scope()->as<LexicalScope>().nextFrameSlot();
+
+ case ScopeKind::ClassBody:
+ return si.scope()->as<ClassBodyScope>().nextFrameSlot();
+
+ case ScopeKind::NamedLambda:
+ case ScopeKind::StrictNamedLambda:
+ // Named lambda scopes cannot have frame slots.
+ return 0;
+
+ case ScopeKind::Eval:
+ case ScopeKind::StrictEval:
+ return si.scope()->as<EvalScope>().nextFrameSlot();
+
+ case ScopeKind::Global:
+ case ScopeKind::NonSyntactic:
+ return 0;
+
+ case ScopeKind::Module:
+ return si.scope()->as<ModuleScope>().nextFrameSlot();
+
+ case ScopeKind::WasmInstance:
+ case ScopeKind::WasmFunction:
+ // Invalid; MOZ_CRASH below.
+ break;
+ }
+ }
+ MOZ_CRASH("Not an enclosing intra-frame Scope");
+}
+
+/* static */
+uint32_t LexicalScope::nextFrameSlot(Scope* scope) {
+ return NextFrameSlot(scope);
+}
+
+/* static */
+uint32_t ClassBodyScope::nextFrameSlot(Scope* scope) {
+ return NextFrameSlot(scope);
+}
+
+/* static */
+void LexicalScope::prepareForScopeCreation(ScopeKind kind,
+ uint32_t firstFrameSlot,
+ LexicalScope::ParserData* data,
+ mozilla::Maybe<uint32_t>* envShape) {
+ bool isNamedLambda =
+ kind == ScopeKind::NamedLambda || kind == ScopeKind::StrictNamedLambda;
+
+ MOZ_ASSERT_IF(isNamedLambda, firstFrameSlot == LOCALNO_LIMIT);
+
+ ParserBindingIter bi(*data, firstFrameSlot, isNamedLambda);
+ PrepareScopeData<LexicalScope, BlockLexicalEnvironmentObject>(
+ bi, data, firstFrameSlot, envShape);
+}
+
+/* static */
+SharedShape* LexicalScope::getEmptyExtensibleEnvironmentShape(JSContext* cx) {
+ const JSClass* cls = &LexicalEnvironmentObject::class_;
+ return EmptyEnvironmentShape(cx, cls, JSSLOT_FREE(cls), ObjectFlags());
+}
+
+/* static */
+void ClassBodyScope::prepareForScopeCreation(
+ ScopeKind kind, uint32_t firstFrameSlot, ClassBodyScope::ParserData* data,
+ mozilla::Maybe<uint32_t>* envShape) {
+ MOZ_ASSERT(kind == ScopeKind::ClassBody);
+
+ ParserBindingIter bi(*data, firstFrameSlot);
+ PrepareScopeData<ClassBodyScope, BlockLexicalEnvironmentObject>(
+ bi, data, firstFrameSlot, envShape);
+}
+
+/* static */
+void FunctionScope::prepareForScopeCreation(
+ FunctionScope::ParserData* data, bool hasParameterExprs,
+ bool needsEnvironment, mozilla::Maybe<uint32_t>* envShape) {
+ uint32_t firstFrameSlot = 0;
+ ParserBindingIter bi(*data, hasParameterExprs);
+ PrepareScopeData<FunctionScope, CallObject>(bi, data, firstFrameSlot,
+ envShape);
+
+ if (hasParameterExprs) {
+ data->slotInfo.setHasParameterExprs();
+ }
+
+ // An environment may be needed regardless of existence of any closed over
+ // bindings:
+ // - Extensible scopes (i.e., due to direct eval)
+ // - Needing a home object
+ // - Being a derived class constructor
+ // - Being a generator or async function
+ // Also see |FunctionBox::needsExtraBodyVarEnvironmentRegardlessOfBindings()|.
+ updateEnvShapeIfRequired(envShape, needsEnvironment);
+}
+
+JSScript* FunctionScope::script() const {
+ return canonicalFunction()->nonLazyScript();
+}
+
+/* static */
+bool FunctionScope::isSpecialName(frontend::TaggedParserAtomIndex name) {
+ return name == frontend::TaggedParserAtomIndex::WellKnown::arguments() ||
+ name == frontend::TaggedParserAtomIndex::WellKnown::dotThis() ||
+ name == frontend::TaggedParserAtomIndex::WellKnown::dotNewTarget() ||
+ name == frontend::TaggedParserAtomIndex::WellKnown::dotGenerator();
+}
+
+/* static */
+void VarScope::prepareForScopeCreation(ScopeKind kind,
+ VarScope::ParserData* data,
+ uint32_t firstFrameSlot,
+ bool needsEnvironment,
+ mozilla::Maybe<uint32_t>* envShape) {
+ ParserBindingIter bi(*data, firstFrameSlot);
+ PrepareScopeData<VarScope, VarEnvironmentObject>(bi, data, firstFrameSlot,
+ envShape);
+
+ // An environment may be needed regardless of existence of any closed over
+ // bindings:
+ // - Extensible scopes (i.e., due to direct eval)
+ // - Being a generator
+ updateEnvShapeIfRequired(envShape, needsEnvironment);
+}
+
+GlobalScope* GlobalScope::createEmpty(JSContext* cx, ScopeKind kind) {
+ Rooted<UniquePtr<RuntimeData>> data(
+ cx, NewEmptyScopeData<GlobalScope, JSAtom>(cx));
+ if (!data) {
+ return nullptr;
+ }
+
+ return createWithData(cx, kind, &data);
+}
+
+/* static */
+GlobalScope* GlobalScope::createWithData(
+ JSContext* cx, ScopeKind kind, MutableHandle<UniquePtr<RuntimeData>> data) {
+ MOZ_ASSERT(data);
+
+ // The global scope has no environment shape. Its environment is the
+ // global lexical scope and the global object or non-syntactic objects
+ // created by embedding, all of which are not only extensible but may
+ // have names on them deleted.
+ return Scope::create<GlobalScope>(cx, kind, nullptr, nullptr, data);
+}
+
+/* static */
+WithScope* WithScope::create(JSContext* cx, Handle<Scope*> enclosing) {
+ Scope* scope = Scope::create(cx, ScopeKind::With, enclosing, nullptr);
+ return static_cast<WithScope*>(scope);
+}
+
+/* static */
+void EvalScope::prepareForScopeCreation(ScopeKind scopeKind,
+ EvalScope::ParserData* data,
+ mozilla::Maybe<uint32_t>* envShape) {
+ if (scopeKind == ScopeKind::StrictEval) {
+ uint32_t firstFrameSlot = 0;
+ ParserBindingIter bi(*data, true);
+ PrepareScopeData<EvalScope, VarEnvironmentObject>(bi, data, firstFrameSlot,
+ envShape);
+ }
+}
+
+/* static */
+Scope* EvalScope::nearestVarScopeForDirectEval(Scope* scope) {
+ for (ScopeIter si(scope); si; si++) {
+ switch (si.kind()) {
+ case ScopeKind::Function:
+ case ScopeKind::FunctionBodyVar:
+ case ScopeKind::Global:
+ case ScopeKind::NonSyntactic:
+ return scope;
+ default:
+ break;
+ }
+ }
+ return nullptr;
+}
+
+ModuleScope::RuntimeData::RuntimeData(size_t length) {
+ PoisonNames(this, length);
+}
+
+/* static */
+void ModuleScope::prepareForScopeCreation(ModuleScope::ParserData* data,
+ mozilla::Maybe<uint32_t>* envShape) {
+ uint32_t firstFrameSlot = 0;
+ ParserBindingIter bi(*data);
+ PrepareScopeData<ModuleScope, ModuleEnvironmentObject>(
+ bi, data, firstFrameSlot, envShape);
+
+ // Modules always need an environment object for now.
+ bool needsEnvironment = true;
+ updateEnvShapeIfRequired(envShape, needsEnvironment);
+}
+
+template <size_t ArrayLength>
+static JSAtom* GenerateWasmName(JSContext* cx,
+ const char (&prefix)[ArrayLength],
+ uint32_t index) {
+ StringBuffer sb(cx);
+ if (!sb.append(prefix)) {
+ return nullptr;
+ }
+ if (!NumberValueToStringBuffer(NumberValue(index), sb)) {
+ return nullptr;
+ }
+
+ return sb.finishAtom();
+}
+
+static void InitializeTrailingName(AbstractBindingName<JSAtom>* trailingNames,
+ size_t i, JSAtom* name) {
+ void* trailingName = &trailingNames[i];
+ new (trailingName) BindingName(name, false);
+}
+
+template <class DataT>
+static void InitializeNextTrailingName(const Rooted<UniquePtr<DataT>>& data,
+ JSAtom* name) {
+ InitializeTrailingName(GetScopeDataTrailingNamesPointer(data.get().get()),
+ data->length, name);
+ data->length++;
+}
+
+WasmInstanceScope::RuntimeData::RuntimeData(size_t length) {
+ PoisonNames(this, length);
+}
+
+/* static */
+WasmInstanceScope* WasmInstanceScope::create(JSContext* cx,
+ WasmInstanceObject* instance) {
+ size_t namesCount = 0;
+ if (instance->instance().memory()) {
+ namesCount++;
+ }
+ size_t globalsStart = namesCount;
+ size_t globalsCount = instance->instance().metadata().globals.length();
+ namesCount += globalsCount;
+
+ Rooted<UniquePtr<RuntimeData>> data(
+ cx, NewEmptyScopeData<WasmInstanceScope, JSAtom>(cx, namesCount));
+ if (!data) {
+ return nullptr;
+ }
+
+ Rooted<WasmInstanceObject*> rootedInstance(cx, instance);
+ if (instance->instance().memory()) {
+ JSAtom* wasmName = GenerateWasmName(cx, "memory", /* index = */ 0);
+ if (!wasmName) {
+ return nullptr;
+ }
+
+ InitializeNextTrailingName(data, wasmName);
+ }
+
+ for (size_t i = 0; i < globalsCount; i++) {
+ JSAtom* wasmName = GenerateWasmName(cx, "global", i);
+ if (!wasmName) {
+ return nullptr;
+ }
+
+ InitializeNextTrailingName(data, wasmName);
+ }
+
+ MOZ_ASSERT(data->length == namesCount);
+
+ data->instance.init(rootedInstance);
+ data->slotInfo.globalsStart = globalsStart;
+
+ Rooted<Scope*> enclosing(cx, &cx->global()->emptyGlobalScope());
+ return Scope::create<WasmInstanceScope>(cx, ScopeKind::WasmInstance,
+ enclosing,
+ /* envShape = */ nullptr, &data);
+}
+
+/* static */
+WasmFunctionScope* WasmFunctionScope::create(JSContext* cx,
+ Handle<Scope*> enclosing,
+ uint32_t funcIndex) {
+ MOZ_ASSERT(enclosing->is<WasmInstanceScope>());
+
+ Rooted<WasmFunctionScope*> wasmFunctionScope(cx);
+
+ Rooted<WasmInstanceObject*> instance(
+ cx, enclosing->as<WasmInstanceScope>().instance());
+
+ // TODO pull the local variable names from the wasm function definition.
+ wasm::ValTypeVector locals;
+ size_t argsLength;
+ wasm::StackResults unusedStackResults;
+ if (!instance->instance().debug().debugGetLocalTypes(
+ funcIndex, &locals, &argsLength, &unusedStackResults)) {
+ return nullptr;
+ }
+ uint32_t namesCount = locals.length();
+
+ Rooted<UniquePtr<RuntimeData>> data(
+ cx, NewEmptyScopeData<WasmFunctionScope, JSAtom>(cx, namesCount));
+ if (!data) {
+ return nullptr;
+ }
+
+ for (size_t i = 0; i < namesCount; i++) {
+ JSAtom* wasmName = GenerateWasmName(cx, "var", i);
+ if (!wasmName) {
+ return nullptr;
+ }
+
+ InitializeNextTrailingName(data, wasmName);
+ }
+ MOZ_ASSERT(data->length == namesCount);
+
+ return Scope::create<WasmFunctionScope>(cx, ScopeKind::WasmFunction,
+ enclosing,
+ /* envShape = */ nullptr, &data);
+}
+
+ScopeIter::ScopeIter(JSScript* script) : scope_(script->bodyScope()) {}
+
+bool ScopeIter::hasSyntacticEnvironment() const {
+ return scope()->hasEnvironment() &&
+ scope()->kind() != ScopeKind::NonSyntactic;
+}
+
+AbstractBindingIter<JSAtom>::AbstractBindingIter(ScopeKind kind,
+ BaseScopeData* data,
+ uint32_t firstFrameSlot)
+ : BaseAbstractBindingIter<JSAtom>() {
+ switch (kind) {
+ case ScopeKind::Lexical:
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ case ScopeKind::FunctionLexical:
+ init(*static_cast<LexicalScope::RuntimeData*>(data), firstFrameSlot, 0);
+ break;
+ case ScopeKind::NamedLambda:
+ case ScopeKind::StrictNamedLambda:
+ init(*static_cast<LexicalScope::RuntimeData*>(data), LOCALNO_LIMIT,
+ IsNamedLambda);
+ break;
+ case ScopeKind::ClassBody:
+ init(*static_cast<ClassBodyScope::RuntimeData*>(data), firstFrameSlot);
+ break;
+ case ScopeKind::With:
+ // With scopes do not have bindings.
+ index_ = length_ = 0;
+ MOZ_ASSERT(done());
+ break;
+ case ScopeKind::Function: {
+ uint8_t flags = IgnoreDestructuredFormalParameters;
+ if (static_cast<FunctionScope::RuntimeData*>(data)
+ ->slotInfo.hasParameterExprs()) {
+ flags |= HasFormalParameterExprs;
+ }
+ init(*static_cast<FunctionScope::RuntimeData*>(data), flags);
+ break;
+ }
+ case ScopeKind::FunctionBodyVar:
+ init(*static_cast<VarScope::RuntimeData*>(data), firstFrameSlot);
+ break;
+ case ScopeKind::Eval:
+ case ScopeKind::StrictEval:
+ init(*static_cast<EvalScope::RuntimeData*>(data),
+ kind == ScopeKind::StrictEval);
+ break;
+ case ScopeKind::Global:
+ case ScopeKind::NonSyntactic:
+ init(*static_cast<GlobalScope::RuntimeData*>(data));
+ break;
+ case ScopeKind::Module:
+ init(*static_cast<ModuleScope::RuntimeData*>(data));
+ break;
+ case ScopeKind::WasmInstance:
+ init(*static_cast<WasmInstanceScope::RuntimeData*>(data));
+ break;
+ case ScopeKind::WasmFunction:
+ init(*static_cast<WasmFunctionScope::RuntimeData*>(data));
+ break;
+ }
+}
+
+AbstractBindingIter<JSAtom>::AbstractBindingIter(Scope* scope)
+ : AbstractBindingIter<JSAtom>(scope->kind(), scope->rawData(),
+ scope->firstFrameSlot()) {}
+
+AbstractBindingIter<JSAtom>::AbstractBindingIter(JSScript* script)
+ : AbstractBindingIter<JSAtom>(script->bodyScope()) {}
+
+AbstractBindingIter<frontend::TaggedParserAtomIndex>::AbstractBindingIter(
+ const frontend::ScopeStencilRef& ref)
+ : Base() {
+ const ScopeStencil& scope = ref.scope();
+ BaseParserScopeData* data = ref.context_.scopeNames[ref.scopeIndex_];
+ switch (scope.kind()) {
+ case ScopeKind::Lexical:
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ case ScopeKind::FunctionLexical:
+ init(*static_cast<LexicalScope::ParserData*>(data),
+ scope.firstFrameSlot(), 0);
+ break;
+ case ScopeKind::NamedLambda:
+ case ScopeKind::StrictNamedLambda:
+ init(*static_cast<LexicalScope::ParserData*>(data), LOCALNO_LIMIT,
+ IsNamedLambda);
+ break;
+ case ScopeKind::ClassBody:
+ init(*static_cast<ClassBodyScope::ParserData*>(data),
+ scope.firstFrameSlot());
+ break;
+ case ScopeKind::With:
+ // With scopes do not have bindings.
+ index_ = length_ = 0;
+ MOZ_ASSERT(done());
+ break;
+ case ScopeKind::Function: {
+ uint8_t flags = IgnoreDestructuredFormalParameters;
+ if (static_cast<FunctionScope::ParserData*>(data)
+ ->slotInfo.hasParameterExprs()) {
+ flags |= HasFormalParameterExprs;
+ }
+ init(*static_cast<FunctionScope::ParserData*>(data), flags);
+ break;
+ }
+ case ScopeKind::FunctionBodyVar:
+ init(*static_cast<VarScope::ParserData*>(data), scope.firstFrameSlot());
+ break;
+ case ScopeKind::Eval:
+ case ScopeKind::StrictEval:
+ init(*static_cast<EvalScope::ParserData*>(data),
+ scope.kind() == ScopeKind::StrictEval);
+ break;
+ case ScopeKind::Global:
+ case ScopeKind::NonSyntactic:
+ init(*static_cast<GlobalScope::ParserData*>(data));
+ break;
+ case ScopeKind::Module:
+ init(*static_cast<ModuleScope::ParserData*>(data));
+ break;
+ case ScopeKind::WasmInstance:
+ init(*static_cast<WasmInstanceScope::ParserData*>(data));
+ break;
+ case ScopeKind::WasmFunction:
+ init(*static_cast<WasmFunctionScope::ParserData*>(data));
+ break;
+ }
+}
+
+template <typename NameT>
+void BaseAbstractBindingIter<NameT>::init(
+ LexicalScope::AbstractData<NameT>& data, uint32_t firstFrameSlot,
+ uint8_t flags) {
+ auto& slotInfo = data.slotInfo;
+
+ // Named lambda scopes can only have environment slots. If the callee
+ // isn't closed over, it is accessed via JSOp::Callee.
+ if (flags & IsNamedLambda) {
+ // Named lambda binding is weird. Normal BindingKind ordering rules
+ // don't apply.
+ init(/* positionalFormalStart= */ 0,
+ /* nonPositionalFormalStart= */ 0,
+ /* varStart= */ 0,
+ /* letStart= */ 0,
+ /* constStart= */ 0,
+ /* syntheticStart= */ data.length,
+ /* privageMethodStart= */ data.length,
+ /* flags= */ CanHaveEnvironmentSlots | flags,
+ /* firstFrameSlot= */ firstFrameSlot,
+ /* firstEnvironmentSlot= */
+ JSSLOT_FREE(&LexicalEnvironmentObject::class_),
+ /* names= */ GetScopeDataTrailingNames(&data));
+ } else {
+ // imports - [0, 0)
+ // positional formals - [0, 0)
+ // other formals - [0, 0)
+ // vars - [0, 0)
+ // lets - [0, slotInfo.constStart)
+ // consts - [slotInfo.constStart, data.length)
+ // synthetic - [data.length, data.length)
+ // private methods - [data.length, data.length)
+ init(/* positionalFormalStart= */ 0,
+ /* nonPositionalFormalStart= */ 0,
+ /* varStart= */ 0,
+ /* letStart= */ 0,
+ /* constStart= */ slotInfo.constStart,
+ /* syntheticStart= */ data.length,
+ /* privateMethodStart= */ data.length,
+ /* flags= */ CanHaveFrameSlots | CanHaveEnvironmentSlots | flags,
+ /* firstFrameSlot= */ firstFrameSlot,
+ /* firstEnvironmentSlot= */
+ JSSLOT_FREE(&LexicalEnvironmentObject::class_),
+ /* names= */ GetScopeDataTrailingNames(&data));
+ }
+}
+
+template void BaseAbstractBindingIter<JSAtom>::init(
+ LexicalScope::AbstractData<JSAtom>&, uint32_t, uint8_t);
+template void BaseAbstractBindingIter<frontend::TaggedParserAtomIndex>::init(
+ LexicalScope::AbstractData<frontend::TaggedParserAtomIndex>&, uint32_t,
+ uint8_t);
+
+template <typename NameT>
+void BaseAbstractBindingIter<NameT>::init(
+ ClassBodyScope::AbstractData<NameT>& data, uint32_t firstFrameSlot) {
+ auto& slotInfo = data.slotInfo;
+
+ // imports - [0, 0)
+ // positional formals - [0, 0)
+ // other formals - [0, 0)
+ // vars - [0, 0)
+ // lets - [0, 0)
+ // consts - [0, 0)
+ // synthetic - [0, slotInfo.privateMethodStart)
+ // private methods - [slotInfo.privateMethodStart, data.length)
+ init(/* positionalFormalStart= */ 0,
+ /* nonPositionalFormalStart= */ 0,
+ /* varStart= */ 0,
+ /* letStart= */ 0,
+ /* constStart= */ 0,
+ /* syntheticStart= */ 0,
+ /* privateMethodStart= */ slotInfo.privateMethodStart,
+ /* flags= */ CanHaveFrameSlots | CanHaveEnvironmentSlots,
+ /* firstFrameSlot= */ firstFrameSlot,
+ /* firstEnvironmentSlot= */
+ JSSLOT_FREE(&ClassBodyLexicalEnvironmentObject::class_),
+ /* names= */ GetScopeDataTrailingNames(&data));
+}
+
+template void BaseAbstractBindingIter<JSAtom>::init(
+ ClassBodyScope::AbstractData<JSAtom>&, uint32_t);
+template void BaseAbstractBindingIter<frontend::TaggedParserAtomIndex>::init(
+ ClassBodyScope::AbstractData<frontend::TaggedParserAtomIndex>&, uint32_t);
+
+template <typename NameT>
+void BaseAbstractBindingIter<NameT>::init(
+ FunctionScope::AbstractData<NameT>& data, uint8_t flags) {
+ flags = CanHaveFrameSlots | CanHaveEnvironmentSlots | flags;
+ if (!(flags & HasFormalParameterExprs)) {
+ flags |= CanHaveArgumentSlots;
+ }
+
+ auto length = data.length;
+ auto& slotInfo = data.slotInfo;
+
+ // imports - [0, 0)
+ // positional formals - [0, slotInfo.nonPositionalFormalStart)
+ // other formals - [slotInfo.nonPositionalParamStart, slotInfo.varStart)
+ // vars - [slotInfo.varStart, length)
+ // lets - [length, length)
+ // consts - [length, length)
+ // synthetic - [length, length)
+ // private methods - [length, length)
+ init(/* positionalFormalStart= */ 0,
+ /* nonPositionalFormalStart= */ slotInfo.nonPositionalFormalStart,
+ /* varStart= */ slotInfo.varStart,
+ /* letStart= */ length,
+ /* constStart= */ length,
+ /* syntheticStart= */ length,
+ /* privateMethodStart= */ length,
+ /* flags= */ flags,
+ /* firstFrameSlot= */ 0,
+ /* firstEnvironmentSlot= */ JSSLOT_FREE(&CallObject::class_),
+ /* names= */ GetScopeDataTrailingNames(&data));
+}
+template void BaseAbstractBindingIter<JSAtom>::init(
+ FunctionScope::AbstractData<JSAtom>&, uint8_t);
+template void BaseAbstractBindingIter<frontend::TaggedParserAtomIndex>::init(
+ FunctionScope::AbstractData<frontend::TaggedParserAtomIndex>&, uint8_t);
+
+template <typename NameT>
+void BaseAbstractBindingIter<NameT>::init(VarScope::AbstractData<NameT>& data,
+ uint32_t firstFrameSlot) {
+ auto length = data.length;
+
+ // imports - [0, 0)
+ // positional formals - [0, 0)
+ // other formals - [0, 0)
+ // vars - [0, length)
+ // lets - [length, length)
+ // consts - [length, length)
+ // synthetic - [length, length)
+ // private methods - [length, length)
+ init(/* positionalFormalStart= */ 0,
+ /* nonPositionalFormalStart= */ 0,
+ /* varStart= */ 0,
+ /* letStart= */ length,
+ /* constStart= */ length,
+ /* syntheticStart= */ length,
+ /* privateMethodStart= */ length,
+ /* flags= */ CanHaveFrameSlots | CanHaveEnvironmentSlots,
+ /* firstFrameSlot= */ firstFrameSlot,
+ /* firstEnvironmentSlot= */ JSSLOT_FREE(&VarEnvironmentObject::class_),
+ /* names= */ GetScopeDataTrailingNames(&data));
+}
+template void BaseAbstractBindingIter<JSAtom>::init(
+ VarScope::AbstractData<JSAtom>&, uint32_t);
+template void BaseAbstractBindingIter<frontend::TaggedParserAtomIndex>::init(
+ VarScope::AbstractData<frontend::TaggedParserAtomIndex>&, uint32_t);
+
+template <typename NameT>
+void BaseAbstractBindingIter<NameT>::init(
+ GlobalScope::AbstractData<NameT>& data) {
+ auto& slotInfo = data.slotInfo;
+
+ // imports - [0, 0)
+ // positional formals - [0, 0)
+ // other formals - [0, 0)
+ // vars - [0, slotInfo.letStart)
+ // lets - [slotInfo.letStart, slotInfo.constStart)
+ // consts - [slotInfo.constStart, data.length)
+ // synthetic - [data.length, data.length)
+ // private methods - [data.length, data.length)
+ init(/* positionalFormalStart= */ 0,
+ /* nonPositionalFormalStart= */ 0,
+ /* varStart= */ 0,
+ /* letStart= */ slotInfo.letStart,
+ /* constStart= */ slotInfo.constStart,
+ /* syntheticStart= */ data.length,
+ /* privateMethoodStart= */ data.length,
+ /* flags= */ CannotHaveSlots,
+ /* firstFrameSlot= */ UINT32_MAX,
+ /* firstEnvironmentSlot= */ UINT32_MAX,
+ /* names= */ GetScopeDataTrailingNames(&data));
+}
+template void BaseAbstractBindingIter<JSAtom>::init(
+ GlobalScope::AbstractData<JSAtom>&);
+template void BaseAbstractBindingIter<frontend::TaggedParserAtomIndex>::init(
+ GlobalScope::AbstractData<frontend::TaggedParserAtomIndex>&);
+
+template <typename NameT>
+void BaseAbstractBindingIter<NameT>::init(EvalScope::AbstractData<NameT>& data,
+ bool strict) {
+ uint32_t flags;
+ uint32_t firstFrameSlot;
+ uint32_t firstEnvironmentSlot;
+ if (strict) {
+ flags = CanHaveFrameSlots | CanHaveEnvironmentSlots;
+ firstFrameSlot = 0;
+ firstEnvironmentSlot = JSSLOT_FREE(&VarEnvironmentObject::class_);
+ } else {
+ flags = CannotHaveSlots;
+ firstFrameSlot = UINT32_MAX;
+ firstEnvironmentSlot = UINT32_MAX;
+ }
+
+ auto length = data.length;
+
+ // imports - [0, 0)
+ // positional formals - [0, 0)
+ // other formals - [0, 0)
+ // vars - [0, length)
+ // lets - [length, length)
+ // consts - [length, length)
+ // synthetic - [length, length)
+ // private methods - [length, length)
+ init(/* positionalFormalStart= */ 0,
+ /* nonPositionalFormalStart= */ 0,
+ /* varStart= */ 0,
+ /* letStart= */ length,
+ /* constStart= */ length,
+ /* syntheticStart= */ length,
+ /* privateMethodStart= */ length,
+ /* flags= */ flags,
+ /* firstFrameSlot= */ firstFrameSlot,
+ /* firstEnvironmentSlot= */ firstEnvironmentSlot,
+ /* names= */ GetScopeDataTrailingNames(&data));
+}
+template void BaseAbstractBindingIter<JSAtom>::init(
+ EvalScope::AbstractData<JSAtom>&, bool);
+template void BaseAbstractBindingIter<frontend::TaggedParserAtomIndex>::init(
+ EvalScope::AbstractData<frontend::TaggedParserAtomIndex>&, bool);
+
+template <typename NameT>
+void BaseAbstractBindingIter<NameT>::init(
+ ModuleScope::AbstractData<NameT>& data) {
+ auto& slotInfo = data.slotInfo;
+
+ // imports - [0, slotInfo.varStart)
+ // positional formals - [slotInfo.varStart, slotInfo.varStart)
+ // other formals - [slotInfo.varStart, slotInfo.varStart)
+ // vars - [slotInfo.varStart, slotInfo.letStart)
+ // lets - [slotInfo.letStart, slotInfo.constStart)
+ // consts - [slotInfo.constStart, data.length)
+ // synthetic - [data.length, data.length)
+ // private methods - [data.length, data.length)
+ init(
+ /* positionalFormalStart= */ slotInfo.varStart,
+ /* nonPositionalFormalStart= */ slotInfo.varStart,
+ /* varStart= */ slotInfo.varStart,
+ /* letStart= */ slotInfo.letStart,
+ /* constStart= */ slotInfo.constStart,
+ /* syntheticStart= */ data.length,
+ /* privateMethodStart= */ data.length,
+ /* flags= */ CanHaveFrameSlots | CanHaveEnvironmentSlots,
+ /* firstFrameSlot= */ 0,
+ /* firstEnvironmentSlot= */ JSSLOT_FREE(&ModuleEnvironmentObject::class_),
+ /* names= */ GetScopeDataTrailingNames(&data));
+}
+template void BaseAbstractBindingIter<JSAtom>::init(
+ ModuleScope::AbstractData<JSAtom>&);
+template void BaseAbstractBindingIter<frontend::TaggedParserAtomIndex>::init(
+ ModuleScope::AbstractData<frontend::TaggedParserAtomIndex>&);
+
+template <typename NameT>
+void BaseAbstractBindingIter<NameT>::init(
+ WasmInstanceScope::AbstractData<NameT>& data) {
+ auto length = data.length;
+
+ // imports - [0, 0)
+ // positional formals - [0, 0)
+ // other formals - [0, 0)
+ // vars - [0, length)
+ // lets - [length, length)
+ // consts - [length, length)
+ // synthetic - [length, length)
+ // private methods - [length, length)
+ init(/* positionalFormalStart= */ 0,
+ /* nonPositionalFormalStart= */ 0,
+ /* varStart= */ 0,
+ /* letStart= */ length,
+ /* constStart= */ length,
+ /* syntheticStart= */ length,
+ /* privateMethodStart= */ length,
+ /* flags= */ CanHaveFrameSlots | CanHaveEnvironmentSlots,
+ /* firstFrameSlot= */ UINT32_MAX,
+ /* firstEnvironmentSlot= */ UINT32_MAX,
+ /* names= */ GetScopeDataTrailingNames(&data));
+}
+template void BaseAbstractBindingIter<JSAtom>::init(
+ WasmInstanceScope::AbstractData<JSAtom>&);
+template void BaseAbstractBindingIter<frontend::TaggedParserAtomIndex>::init(
+ WasmInstanceScope::AbstractData<frontend::TaggedParserAtomIndex>&);
+
+template <typename NameT>
+void BaseAbstractBindingIter<NameT>::init(
+ WasmFunctionScope::AbstractData<NameT>& data) {
+ auto length = data.length;
+
+ // imports - [0, 0)
+ // positional formals - [0, 0)
+ // other formals - [0, 0)
+ // vars - [0, length)
+ // lets - [length, length)
+ // consts - [length, length)
+ // synthetic - [length, length)
+ // private methods - [length, length)
+ init(/* positionalFormalStart = */ 0,
+ /* nonPositionalFormalStart = */ 0,
+ /* varStart= */ 0,
+ /* letStart= */ length,
+ /* constStart= */ length,
+ /* syntheticStart= */ length,
+ /* privateMethodStart= */ length,
+ /* flags= */ CanHaveFrameSlots | CanHaveEnvironmentSlots,
+ /* firstFrameSlot= */ UINT32_MAX,
+ /* firstEnvironmentSlot= */ UINT32_MAX,
+ /* names= */ GetScopeDataTrailingNames(&data));
+}
+template void BaseAbstractBindingIter<JSAtom>::init(
+ WasmFunctionScope::AbstractData<JSAtom>&);
+template void BaseAbstractBindingIter<frontend::TaggedParserAtomIndex>::init(
+ WasmFunctionScope::AbstractData<frontend::TaggedParserAtomIndex>&);
+
+AbstractPositionalFormalParameterIter<
+ JSAtom>::AbstractPositionalFormalParameterIter(Scope* scope)
+ : Base(scope) {
+ // Reinit with flags = 0, i.e., iterate over all positional parameters.
+ if (scope->is<FunctionScope>()) {
+ init(scope->as<FunctionScope>().data(), /* flags = */ 0);
+ }
+ settle();
+}
+
+AbstractPositionalFormalParameterIter<
+ JSAtom>::AbstractPositionalFormalParameterIter(JSScript* script)
+ : AbstractPositionalFormalParameterIter(script->bodyScope()) {}
+
+void js::DumpBindings(JSContext* cx, Scope* scopeArg) {
+ Rooted<Scope*> scope(cx, scopeArg);
+ for (Rooted<BindingIter> bi(cx, BindingIter(scope)); bi; bi++) {
+ UniqueChars bytes = AtomToPrintableString(cx, bi.name());
+ if (!bytes) {
+ MaybePrintAndClearPendingException(cx);
+ return;
+ }
+ fprintf(stderr, " %s %s ", BindingKindString(bi.kind()), bytes.get());
+ switch (bi.location().kind()) {
+ case BindingLocation::Kind::Global:
+ if (bi.isTopLevelFunction()) {
+ fprintf(stderr, "global function\n");
+ } else {
+ fprintf(stderr, "global\n");
+ }
+ break;
+ case BindingLocation::Kind::Argument:
+ fprintf(stderr, "arg slot %u\n", bi.location().argumentSlot());
+ break;
+ case BindingLocation::Kind::Frame:
+ fprintf(stderr, "frame slot %u\n", bi.location().slot());
+ break;
+ case BindingLocation::Kind::Environment:
+ fprintf(stderr, "env slot %u\n", bi.location().slot());
+ break;
+ case BindingLocation::Kind::NamedLambdaCallee:
+ fprintf(stderr, "named lambda callee\n");
+ break;
+ case BindingLocation::Kind::Import:
+ fprintf(stderr, "import\n");
+ break;
+ }
+ }
+}
+
+static JSAtom* GetFrameSlotNameInScope(Scope* scope, uint32_t slot) {
+ for (BindingIter bi(scope); bi; bi++) {
+ BindingLocation loc = bi.location();
+ if (loc.kind() == BindingLocation::Kind::Frame && loc.slot() == slot) {
+ return bi.name();
+ }
+ }
+ return nullptr;
+}
+
+JSAtom* js::FrameSlotName(JSScript* script, jsbytecode* pc) {
+ MOZ_ASSERT(IsLocalOp(JSOp(*pc)));
+ uint32_t slot = GET_LOCALNO(pc);
+ MOZ_ASSERT(slot < script->nfixed());
+
+ // Look for it in the body scope first.
+ if (JSAtom* name = GetFrameSlotNameInScope(script->bodyScope(), slot)) {
+ return name;
+ }
+
+ // If this is a function script and there is an extra var scope, look for
+ // it there.
+ if (script->functionHasExtraBodyVarScope()) {
+ if (JSAtom* name = GetFrameSlotNameInScope(
+ script->functionExtraBodyVarScope(), slot)) {
+ return name;
+ }
+ }
+ // If not found, look for it in a lexical scope.
+ for (ScopeIter si(script->innermostScope(pc)); si; si++) {
+ if (!si.scope()->is<LexicalScope>() && !si.scope()->is<ClassBodyScope>()) {
+ continue;
+ }
+
+ // Is the slot within bounds of the current lexical scope?
+ if (slot < si.scope()->firstFrameSlot()) {
+ continue;
+ }
+ if (slot >= LexicalScope::nextFrameSlot(si.scope())) {
+ break;
+ }
+
+ // If so, get the name.
+ if (JSAtom* name = GetFrameSlotNameInScope(si.scope(), slot)) {
+ return name;
+ }
+ }
+
+ MOZ_CRASH("Frame slot not found");
+}
+
+JS::ubi::Node::Size JS::ubi::Concrete<Scope>::size(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return js::gc::Arena::thingSize(get().asTenured().getAllocKind()) +
+ get().sizeOfExcludingThis(mallocSizeOf);
+}
+
+template <typename... Args>
+/* static */ bool ScopeStencil::appendScopeStencilAndData(
+ FrontendContext* fc, CompilationState& compilationState,
+ BaseParserScopeData* data, ScopeIndex* indexOut, Args&&... args) {
+ *indexOut = ScopeIndex(compilationState.scopeData.length());
+ if (uint32_t(*indexOut) >= TaggedScriptThingIndex::IndexLimit) {
+ ReportAllocationOverflow(fc);
+ return false;
+ }
+
+ if (!compilationState.scopeData.emplaceBack(std::forward<Args>(args)...)) {
+ js::ReportOutOfMemory(fc);
+ return false;
+ }
+ if (!compilationState.scopeNames.append(data)) {
+ compilationState.scopeData.popBack();
+ MOZ_ASSERT(compilationState.scopeData.length() ==
+ compilationState.scopeNames.length());
+
+ js::ReportOutOfMemory(fc);
+ return false;
+ }
+
+ return true;
+}
+
+/* static */
+bool ScopeStencil::createForFunctionScope(
+ FrontendContext* fc, frontend::CompilationState& compilationState,
+ FunctionScope::ParserData* data, bool hasParameterExprs,
+ bool needsEnvironment, ScriptIndex functionIndex, bool isArrow,
+ mozilla::Maybe<ScopeIndex> enclosing, ScopeIndex* index) {
+ auto kind = ScopeKind::Function;
+ using ScopeType = FunctionScope;
+ MOZ_ASSERT(matchScopeKind<ScopeType>(kind));
+
+ if (data) {
+ MarkParserScopeData<ScopeType>(data, compilationState);
+ } else {
+ data = NewEmptyParserScopeData<ScopeType>(fc, compilationState.alloc);
+ if (!data) {
+ return false;
+ }
+ }
+
+ uint32_t firstFrameSlot = 0;
+ mozilla::Maybe<uint32_t> envShape;
+ FunctionScope::prepareForScopeCreation(data, hasParameterExprs,
+ needsEnvironment, &envShape);
+
+ return appendScopeStencilAndData(fc, compilationState, data, index, kind,
+ enclosing, firstFrameSlot, envShape,
+ mozilla::Some(functionIndex), isArrow);
+}
+
+/* static */
+bool ScopeStencil::createForLexicalScope(
+ FrontendContext* fc, frontend::CompilationState& compilationState,
+ ScopeKind kind, LexicalScope::ParserData* data, uint32_t firstFrameSlot,
+ mozilla::Maybe<ScopeIndex> enclosing, ScopeIndex* index) {
+ using ScopeType = LexicalScope;
+ MOZ_ASSERT(matchScopeKind<ScopeType>(kind));
+
+ if (data) {
+ MarkParserScopeData<ScopeType>(data, compilationState);
+ } else {
+ data = NewEmptyParserScopeData<ScopeType>(fc, compilationState.alloc);
+ if (!data) {
+ return false;
+ }
+ }
+
+ mozilla::Maybe<uint32_t> envShape;
+ ScopeType::prepareForScopeCreation(kind, firstFrameSlot, data, &envShape);
+
+ return appendScopeStencilAndData(fc, compilationState, data, index, kind,
+ enclosing, firstFrameSlot, envShape);
+}
+
+/* static */
+bool ScopeStencil::createForClassBodyScope(
+ FrontendContext* fc, frontend::CompilationState& compilationState,
+ ScopeKind kind, ClassBodyScope::ParserData* data, uint32_t firstFrameSlot,
+ mozilla::Maybe<ScopeIndex> enclosing, ScopeIndex* index) {
+ using ScopeType = ClassBodyScope;
+ MOZ_ASSERT(matchScopeKind<ScopeType>(kind));
+
+ if (data) {
+ MarkParserScopeData<ScopeType>(data, compilationState);
+ } else {
+ data = NewEmptyParserScopeData<ScopeType>(fc, compilationState.alloc);
+ if (!data) {
+ return false;
+ }
+ }
+
+ mozilla::Maybe<uint32_t> envShape;
+ ScopeType::prepareForScopeCreation(kind, firstFrameSlot, data, &envShape);
+
+ return appendScopeStencilAndData(fc, compilationState, data, index, kind,
+ enclosing, firstFrameSlot, envShape);
+}
+
+bool ScopeStencil::createForVarScope(
+ FrontendContext* fc, frontend::CompilationState& compilationState,
+ ScopeKind kind, VarScope::ParserData* data, uint32_t firstFrameSlot,
+ bool needsEnvironment, mozilla::Maybe<ScopeIndex> enclosing,
+ ScopeIndex* index) {
+ using ScopeType = VarScope;
+ MOZ_ASSERT(matchScopeKind<ScopeType>(kind));
+
+ if (data) {
+ MarkParserScopeData<ScopeType>(data, compilationState);
+ } else {
+ data = NewEmptyParserScopeData<ScopeType>(fc, compilationState.alloc);
+ if (!data) {
+ return false;
+ }
+ }
+
+ mozilla::Maybe<uint32_t> envShape;
+ VarScope::prepareForScopeCreation(kind, data, firstFrameSlot,
+ needsEnvironment, &envShape);
+
+ return appendScopeStencilAndData(fc, compilationState, data, index, kind,
+ enclosing, firstFrameSlot, envShape);
+}
+
+/* static */
+bool ScopeStencil::createForGlobalScope(
+ FrontendContext* fc, frontend::CompilationState& compilationState,
+ ScopeKind kind, GlobalScope::ParserData* data, ScopeIndex* index) {
+ using ScopeType = GlobalScope;
+ MOZ_ASSERT(matchScopeKind<ScopeType>(kind));
+
+ if (data) {
+ MarkParserScopeData<ScopeType>(data, compilationState);
+ } else {
+ data = NewEmptyParserScopeData<ScopeType>(fc, compilationState.alloc);
+ if (!data) {
+ return false;
+ }
+ }
+
+ // The global scope has no environment shape. Its environment is the
+ // global lexical scope and the global object or non-syntactic objects
+ // created by embedding, all of which are not only extensible but may
+ // have names on them deleted.
+ uint32_t firstFrameSlot = 0;
+ mozilla::Maybe<uint32_t> envShape;
+
+ mozilla::Maybe<ScopeIndex> enclosing;
+
+ return appendScopeStencilAndData(fc, compilationState, data, index, kind,
+ enclosing, firstFrameSlot, envShape);
+}
+
+/* static */
+bool ScopeStencil::createForEvalScope(
+ FrontendContext* fc, frontend::CompilationState& compilationState,
+ ScopeKind kind, EvalScope::ParserData* data,
+ mozilla::Maybe<ScopeIndex> enclosing, ScopeIndex* index) {
+ using ScopeType = EvalScope;
+ MOZ_ASSERT(matchScopeKind<ScopeType>(kind));
+
+ if (data) {
+ MarkParserScopeData<ScopeType>(data, compilationState);
+ } else {
+ data = NewEmptyParserScopeData<ScopeType>(fc, compilationState.alloc);
+ if (!data) {
+ return false;
+ }
+ }
+
+ uint32_t firstFrameSlot = 0;
+ mozilla::Maybe<uint32_t> envShape;
+ EvalScope::prepareForScopeCreation(kind, data, &envShape);
+
+ return appendScopeStencilAndData(fc, compilationState, data, index, kind,
+ enclosing, firstFrameSlot, envShape);
+}
+
+/* static */
+bool ScopeStencil::createForModuleScope(
+ FrontendContext* fc, frontend::CompilationState& compilationState,
+ ModuleScope::ParserData* data, mozilla::Maybe<ScopeIndex> enclosing,
+ ScopeIndex* index) {
+ auto kind = ScopeKind::Module;
+ using ScopeType = ModuleScope;
+ MOZ_ASSERT(matchScopeKind<ScopeType>(kind));
+
+ if (data) {
+ MarkParserScopeData<ScopeType>(data, compilationState);
+ } else {
+ data = NewEmptyParserScopeData<ScopeType>(fc, compilationState.alloc);
+ if (!data) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(enclosing.isNothing());
+
+ // The data that's passed in is from the frontend and is LifoAlloc'd.
+ // Copy it now that we're creating a permanent VM scope.
+ uint32_t firstFrameSlot = 0;
+ mozilla::Maybe<uint32_t> envShape;
+ ModuleScope::prepareForScopeCreation(data, &envShape);
+
+ return appendScopeStencilAndData(fc, compilationState, data, index, kind,
+ enclosing, firstFrameSlot, envShape);
+}
+
+template <typename SpecificEnvironmentT>
+bool ScopeStencil::createSpecificShape(
+ JSContext* cx, ScopeKind kind, BaseScopeData* scopeData,
+ MutableHandle<SharedShape*> shape) const {
+ const JSClass* cls = &SpecificEnvironmentT::class_;
+ constexpr ObjectFlags objectFlags = SpecificEnvironmentT::OBJECT_FLAGS;
+
+ if (hasEnvironmentShape()) {
+ if (numEnvironmentSlots() > 0) {
+ BindingIter bi(kind, scopeData, firstFrameSlot_);
+ shape.set(CreateEnvironmentShape(cx, bi, cls, numEnvironmentSlots(),
+ objectFlags));
+ return shape;
+ }
+
+ shape.set(EmptyEnvironmentShape(cx, cls, JSSLOT_FREE(cls), objectFlags));
+ return shape;
+ }
+
+ return true;
+}
+
+/* static */
+bool ScopeStencil::createForWithScope(FrontendContext* fc,
+ CompilationState& compilationState,
+ mozilla::Maybe<ScopeIndex> enclosing,
+ ScopeIndex* index) {
+ auto kind = ScopeKind::With;
+ MOZ_ASSERT(matchScopeKind<WithScope>(kind));
+
+ uint32_t firstFrameSlot = 0;
+ mozilla::Maybe<uint32_t> envShape;
+
+ return appendScopeStencilAndData(fc, compilationState, nullptr, index, kind,
+ enclosing, firstFrameSlot, envShape);
+}
+
+template <typename SpecificScopeT>
+UniquePtr<typename SpecificScopeT::RuntimeData>
+ScopeStencil::createSpecificScopeData(JSContext* cx,
+ CompilationAtomCache& atomCache,
+ BaseParserScopeData* baseData) const {
+ return LiftParserScopeData<SpecificScopeT>(cx, atomCache, baseData);
+}
+
+template <>
+UniquePtr<FunctionScope::RuntimeData>
+ScopeStencil::createSpecificScopeData<FunctionScope>(
+ JSContext* cx, CompilationAtomCache& atomCache,
+ BaseParserScopeData* baseData) const {
+ // Allocate a new vm function-scope.
+ UniquePtr<FunctionScope::RuntimeData> data =
+ LiftParserScopeData<FunctionScope>(cx, atomCache, baseData);
+ if (!data) {
+ return nullptr;
+ }
+
+ return data;
+}
+
+template <>
+UniquePtr<ModuleScope::RuntimeData>
+ScopeStencil::createSpecificScopeData<ModuleScope>(
+ JSContext* cx, CompilationAtomCache& atomCache,
+ BaseParserScopeData* baseData) const {
+ // Allocate a new vm module-scope.
+ UniquePtr<ModuleScope::RuntimeData> data =
+ LiftParserScopeData<ModuleScope>(cx, atomCache, baseData);
+ if (!data) {
+ return nullptr;
+ }
+
+ return data;
+}
+
+// WithScope does not use binding data.
+template <>
+Scope* ScopeStencil::createSpecificScope<WithScope, std::nullptr_t>(
+ JSContext* cx, CompilationAtomCache& atomCache,
+ Handle<Scope*> enclosingScope, BaseParserScopeData* baseData) const {
+ return Scope::create(cx, ScopeKind::With, enclosingScope, nullptr);
+}
+
+// GlobalScope has bindings but no environment shape.
+template <>
+Scope* ScopeStencil::createSpecificScope<GlobalScope, std::nullptr_t>(
+ JSContext* cx, CompilationAtomCache& atomCache,
+ Handle<Scope*> enclosingScope, BaseParserScopeData* baseData) const {
+ Rooted<UniquePtr<GlobalScope::RuntimeData>> rootedData(
+ cx, createSpecificScopeData<GlobalScope>(cx, atomCache, baseData));
+ if (!rootedData) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(!hasEnclosing());
+ MOZ_ASSERT(!enclosingScope);
+
+ // Because we already baked the data here, we needn't do it again.
+ return Scope::create<GlobalScope>(cx, kind(), nullptr, nullptr, &rootedData);
+}
+
+template <typename SpecificScopeT, typename SpecificEnvironmentT>
+Scope* ScopeStencil::createSpecificScope(JSContext* cx,
+ CompilationAtomCache& atomCache,
+ Handle<Scope*> enclosingScope,
+ BaseParserScopeData* baseData) const {
+ Rooted<UniquePtr<typename SpecificScopeT::RuntimeData>> rootedData(
+ cx, createSpecificScopeData<SpecificScopeT>(cx, atomCache, baseData));
+ if (!rootedData) {
+ return nullptr;
+ }
+
+ Rooted<SharedShape*> shape(cx);
+ if (!createSpecificShape<SpecificEnvironmentT>(
+ cx, kind(), rootedData.get().get(), &shape)) {
+ return nullptr;
+ }
+
+ // Because we already baked the data here, we needn't do it again.
+ return Scope::create<SpecificScopeT>(cx, kind(), enclosingScope, shape,
+ &rootedData);
+}
+
+template Scope* ScopeStencil::createSpecificScope<FunctionScope, CallObject>(
+ JSContext* cx, CompilationAtomCache& atomCache,
+ Handle<Scope*> enclosingScope, BaseParserScopeData* baseData) const;
+template Scope*
+ScopeStencil::createSpecificScope<LexicalScope, BlockLexicalEnvironmentObject>(
+ JSContext* cx, CompilationAtomCache& atomCache,
+ Handle<Scope*> enclosingScope, BaseParserScopeData* baseData) const;
+template Scope* ScopeStencil::createSpecificScope<
+ ClassBodyScope, BlockLexicalEnvironmentObject>(
+ JSContext* cx, CompilationAtomCache& atomCache,
+ Handle<Scope*> enclosingScope, BaseParserScopeData* baseData) const;
+template Scope*
+ScopeStencil::createSpecificScope<EvalScope, VarEnvironmentObject>(
+ JSContext* cx, CompilationAtomCache& atomCache,
+ Handle<Scope*> enclosingScope, BaseParserScopeData* baseData) const;
+template Scope*
+ScopeStencil::createSpecificScope<VarScope, VarEnvironmentObject>(
+ JSContext* cx, CompilationAtomCache& atomCache,
+ Handle<Scope*> enclosingScope, BaseParserScopeData* baseData) const;
+template Scope*
+ScopeStencil::createSpecificScope<ModuleScope, ModuleEnvironmentObject>(
+ JSContext* cx, CompilationAtomCache& atomCache,
+ Handle<Scope*> enclosingScope, BaseParserScopeData* baseData) const;
diff --git a/js/src/vm/Scope.h b/js/src/vm/Scope.h
new file mode 100644
index 0000000000..1841891a22
--- /dev/null
+++ b/js/src/vm/Scope.h
@@ -0,0 +1,1891 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Scope_h
+#define vm_Scope_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_ASSERT_IF
+#include "mozilla/Attributes.h" // MOZ_IMPLICIT, MOZ_INIT_OUTSIDE_CTOR, MOZ_STACK_CLASS
+#include "mozilla/Casting.h" // mozilla::AssertedCast
+#include "mozilla/Maybe.h" // mozilla::Maybe
+#include "mozilla/MemoryReporting.h" // mozilla::MallocSizeOf
+#include "mozilla/Span.h" // mozilla::Span
+
+#include <algorithm> // std::fill_n
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint16_t, uint32_t, uintptr_t
+#include <type_traits> // std::is_same_v, std::is_base_of_v
+
+#include "builtin/ModuleObject.h" // ModuleObject, Handle<ModuleObject*>
+#include "frontend/ParserAtom.h" // frontend::TaggedParserAtomIndex
+#include "gc/Barrier.h" // HeapPtr
+#include "gc/Cell.h" // TenuredCellWithNonGCPointer
+#include "js/GCPolicyAPI.h" // GCPolicy, IgnoreGCPolicy
+#include "js/HeapAPI.h" // CellFlagBitsReservedForGC
+#include "js/RootingAPI.h" // Handle, MutableHandle
+#include "js/TraceKind.h" // JS::TraceKind
+#include "js/TypeDecls.h" // HandleFunction
+#include "js/UbiNode.h" // ubi::*
+#include "js/UniquePtr.h" // UniquePtr
+#include "util/Poison.h" // AlwaysPoison, JS_SCOPE_DATA_TRAILING_NAMES_PATTERN, MemCheckKind
+#include "vm/JSFunction.h" // JSFunction
+#include "vm/ScopeKind.h" // ScopeKind
+#include "vm/Shape.h" // Shape
+#include "wasm/WasmJS.h" // WasmInstanceObject
+
+class JSAtom;
+class JSScript;
+class JSTracer;
+struct JSContext;
+
+namespace js {
+
+class JS_PUBLIC_API GenericPrinter;
+
+namespace frontend {
+class ScopeStencil;
+struct ScopeStencilRef;
+class RuntimeScopeBindingCache;
+} // namespace frontend
+
+template <typename NameT>
+class AbstractBaseScopeData;
+
+template <typename NameT>
+class BaseAbstractBindingIter;
+
+template <typename NameT>
+class AbstractBindingIter;
+
+template <typename NameT>
+class AbstractPositionalFormalParameterIter;
+
+using BindingIter = AbstractBindingIter<JSAtom>;
+
+class AbstractScopePtr;
+
+static inline bool ScopeKindIsCatch(ScopeKind kind) {
+ return kind == ScopeKind::SimpleCatch || kind == ScopeKind::Catch;
+}
+
+static inline bool ScopeKindIsInBody(ScopeKind kind) {
+ return kind == ScopeKind::Lexical || kind == ScopeKind::SimpleCatch ||
+ kind == ScopeKind::Catch || kind == ScopeKind::With ||
+ kind == ScopeKind::FunctionLexical ||
+ kind == ScopeKind::FunctionBodyVar || kind == ScopeKind::ClassBody;
+}
+
+const char* BindingKindString(BindingKind kind);
+const char* ScopeKindString(ScopeKind kind);
+
+template <typename NameT>
+class AbstractBindingName;
+
+template <>
+class AbstractBindingName<JSAtom> {
+ public:
+ using NameT = JSAtom;
+ using NamePointerT = NameT*;
+
+ private:
+ // A JSAtom* with its low bit used as a tag for the:
+ // * whether it is closed over (i.e., exists in the environment shape)
+ // * whether it is a top-level function binding in global or eval scope,
+ // instead of var binding (both are in the same range in Scope data)
+ uintptr_t bits_;
+
+ static constexpr uintptr_t ClosedOverFlag = 0x1;
+ // TODO: We should reuse this bit for let vs class distinction to
+ // show the better redeclaration error message (bug 1428672).
+ static constexpr uintptr_t TopLevelFunctionFlag = 0x2;
+ static constexpr uintptr_t FlagMask = 0x3;
+
+ public:
+ AbstractBindingName() : bits_(0) {}
+
+ AbstractBindingName(NameT* name, bool closedOver,
+ bool isTopLevelFunction = false)
+ : bits_(uintptr_t(name) | (closedOver ? ClosedOverFlag : 0x0) |
+ (isTopLevelFunction ? TopLevelFunctionFlag : 0x0)) {}
+
+ NamePointerT name() const {
+ return reinterpret_cast<NameT*>(bits_ & ~FlagMask);
+ }
+
+ bool closedOver() const { return bits_ & ClosedOverFlag; }
+
+ private:
+ friend class BaseAbstractBindingIter<NameT>;
+
+ // This method should be called only for binding names in `vars` range in
+ // BindingIter.
+ bool isTopLevelFunction() const { return bits_ & TopLevelFunctionFlag; }
+
+ public:
+ void trace(JSTracer* trc) {
+ if (JSAtom* atom = name()) {
+ TraceManuallyBarrieredEdge(trc, &atom, "binding name");
+ }
+ }
+};
+
+template <>
+class AbstractBindingName<frontend::TaggedParserAtomIndex> {
+ uint32_t bits_;
+
+ using TaggedParserAtomIndex = frontend::TaggedParserAtomIndex;
+
+ public:
+ using NameT = TaggedParserAtomIndex;
+ using NamePointerT = NameT;
+
+ private:
+ static constexpr size_t TaggedIndexBit = TaggedParserAtomIndex::IndexBit + 2;
+
+ static constexpr size_t FlagShift = TaggedIndexBit;
+ static constexpr size_t FlagBit = 2;
+ static constexpr uint32_t FlagMask = BitMask(FlagBit) << FlagShift;
+
+ static constexpr uint32_t ClosedOverFlag = 1 << FlagShift;
+ static constexpr uint32_t TopLevelFunctionFlag = 2 << FlagShift;
+
+ public:
+ AbstractBindingName() : bits_(TaggedParserAtomIndex::NullTag) {
+ // TaggedParserAtomIndex's tags shouldn't overlap with flags.
+ static_assert((TaggedParserAtomIndex::NullTag & FlagMask) == 0);
+ static_assert((TaggedParserAtomIndex::ParserAtomIndexTag & FlagMask) == 0);
+ static_assert((TaggedParserAtomIndex::WellKnownTag & FlagMask) == 0);
+ }
+
+ AbstractBindingName(TaggedParserAtomIndex name, bool closedOver,
+ bool isTopLevelFunction = false)
+ : bits_(name.rawData() | (closedOver ? ClosedOverFlag : 0x0) |
+ (isTopLevelFunction ? TopLevelFunctionFlag : 0x0)) {}
+
+ public:
+ NamePointerT name() const {
+ return TaggedParserAtomIndex::fromRaw(bits_ & ~FlagMask);
+ }
+
+ bool closedOver() const { return bits_ & ClosedOverFlag; }
+
+ AbstractBindingName<JSAtom> copyWithNewAtom(JSAtom* newName) const {
+ return AbstractBindingName<JSAtom>(newName, closedOver(),
+ isTopLevelFunction());
+ }
+
+ void updateNameAfterStencilMerge(TaggedParserAtomIndex name) {
+ bits_ = (bits_ & FlagMask) | name.rawData();
+ }
+
+ private:
+ friend class BaseAbstractBindingIter<TaggedParserAtomIndex>;
+ friend class frontend::ScopeStencil;
+
+ // This method should be called only for binding names in `vars` range in
+ // BindingIter.
+ bool isTopLevelFunction() const { return bits_ & TopLevelFunctionFlag; }
+};
+
+using BindingName = AbstractBindingName<JSAtom>;
+
+static inline void TraceBindingNames(JSTracer* trc, BindingName* names,
+ uint32_t length) {
+ for (uint32_t i = 0; i < length; i++) {
+ JSAtom* name = names[i].name();
+ MOZ_ASSERT(name);
+ TraceManuallyBarrieredEdge(trc, &name, "scope name");
+ }
+};
+static inline void TraceNullableBindingNames(JSTracer* trc, BindingName* names,
+ uint32_t length) {
+ for (uint32_t i = 0; i < length; i++) {
+ if (JSAtom* name = names[i].name()) {
+ TraceManuallyBarrieredEdge(trc, &name, "scope name");
+ }
+ }
+};
+
+const size_t ScopeDataAlignBytes = size_t(1) << gc::CellFlagBitsReservedForGC;
+
+/**
+ * Base class for scope {Runtime,Parser}Data classes to inherit from.
+ *
+ * `js::Scope` stores a pointer to RuntimeData classes in their first word, so
+ * they must be suitably aligned to allow storing GC flags in the low bits.
+ */
+template <typename NameT>
+class AbstractBaseScopeData {
+ public:
+ using NameType = NameT;
+
+ // The length of names after specialized ScopeData subclasses.
+ uint32_t length = 0;
+};
+
+template <typename ScopeDataT>
+static inline void AssertDerivedScopeData() {
+ static_assert(
+ !std::is_same_v<ScopeDataT,
+ AbstractBaseScopeData<typename ScopeDataT::NameType>>,
+ "ScopeDataT shouldn't be AbstractBaseScopeData");
+ static_assert(
+ std::is_base_of_v<AbstractBaseScopeData<typename ScopeDataT::NameType>,
+ ScopeDataT>,
+ "ScopeDataT should be subclass of AbstractBaseScopeData");
+}
+
+template <typename ScopeDataT>
+static inline size_t GetOffsetOfScopeDataTrailingNames() {
+ AssertDerivedScopeData<ScopeDataT>();
+ return sizeof(ScopeDataT);
+}
+
+template <typename ScopeDataT>
+static inline AbstractBindingName<typename ScopeDataT::NameType>*
+GetScopeDataTrailingNamesPointer(ScopeDataT* data) {
+ AssertDerivedScopeData<ScopeDataT>();
+ return reinterpret_cast<AbstractBindingName<typename ScopeDataT::NameType>*>(
+ data + 1);
+}
+
+template <typename ScopeDataT>
+static inline const AbstractBindingName<typename ScopeDataT::NameType>*
+GetScopeDataTrailingNamesPointer(const ScopeDataT* data) {
+ AssertDerivedScopeData<ScopeDataT>();
+ return reinterpret_cast<
+ const AbstractBindingName<typename ScopeDataT::NameType>*>(data + 1);
+}
+
+template <typename ScopeDataT>
+static inline mozilla::Span<AbstractBindingName<typename ScopeDataT::NameType>>
+GetScopeDataTrailingNames(ScopeDataT* data) {
+ return mozilla::Span(GetScopeDataTrailingNamesPointer(data), data->length);
+}
+
+template <typename ScopeDataT>
+static inline mozilla::Span<
+ const AbstractBindingName<typename ScopeDataT::NameType>>
+GetScopeDataTrailingNames(const ScopeDataT* data) {
+ return mozilla::Span(GetScopeDataTrailingNamesPointer(data), data->length);
+}
+
+using BaseScopeData = AbstractBaseScopeData<JSAtom>;
+
+inline void PoisonNames(AbstractBindingName<JSAtom>* data, uint32_t length) {
+ AlwaysPoison(data, JS_SCOPE_DATA_TRAILING_NAMES_PATTERN,
+ sizeof(AbstractBindingName<JSAtom>) * length,
+ MemCheckKind::MakeUndefined);
+}
+
+// frontend::TaggedParserAtomIndex doesn't require poison value.
+// Fill with null value instead.
+inline void PoisonNames(
+ AbstractBindingName<frontend::TaggedParserAtomIndex>* data,
+ uint32_t length) {
+ std::fill_n(data, length,
+ AbstractBindingName<frontend::TaggedParserAtomIndex>());
+}
+
+template <typename ScopeDataT>
+static inline void PoisonNames(ScopeDataT* data, uint32_t length) {
+ if (length) {
+ PoisonNames(GetScopeDataTrailingNamesPointer(data), length);
+ }
+}
+
+//
+// Allow using is<T> and as<T> on Rooted<Scope*> and Handle<Scope*>.
+//
+template <typename Wrapper>
+class WrappedPtrOperations<Scope*, Wrapper> {
+ public:
+ template <class U>
+ JS::Handle<U*> as() const {
+ const Wrapper& self = *static_cast<const Wrapper*>(this);
+ MOZ_ASSERT_IF(self, self->template is<U>());
+ return Handle<U*>::fromMarkedLocation(
+ reinterpret_cast<U* const*>(self.address()));
+ }
+};
+
+//
+// The base class of all Scopes.
+//
+class Scope : public gc::TenuredCellWithNonGCPointer<BaseScopeData> {
+ friend class GCMarker;
+ friend class frontend::ScopeStencil;
+ friend class js::AbstractBindingIter<JSAtom>;
+ friend class js::frontend::RuntimeScopeBindingCache;
+ friend class gc::CellAllocator;
+
+ protected:
+ // The raw data pointer, stored in the cell header.
+ BaseScopeData* rawData() { return headerPtr(); }
+ const BaseScopeData* rawData() const { return headerPtr(); }
+
+ // The kind determines data_.
+ const ScopeKind kind_;
+
+ // If there are any aliased bindings, the shape for the
+ // EnvironmentObject. Otherwise nullptr.
+ const HeapPtr<SharedShape*> environmentShape_;
+
+ // The enclosing scope or nullptr.
+ HeapPtr<Scope*> enclosingScope_;
+
+ Scope(ScopeKind kind, Scope* enclosing, SharedShape* environmentShape)
+ : TenuredCellWithNonGCPointer(nullptr),
+ kind_(kind),
+ environmentShape_(environmentShape),
+ enclosingScope_(enclosing) {}
+
+ static Scope* create(JSContext* cx, ScopeKind kind, Handle<Scope*> enclosing,
+ Handle<SharedShape*> envShape);
+
+ template <typename ConcreteScope>
+ void initData(
+ MutableHandle<UniquePtr<typename ConcreteScope::RuntimeData>> data);
+
+ template <typename F>
+ void applyScopeDataTyped(F&& f);
+
+ static void updateEnvShapeIfRequired(mozilla::Maybe<uint32_t>* envShape,
+ bool needsEnvironment);
+
+ public:
+ template <typename ConcreteScope>
+ static ConcreteScope* create(
+ JSContext* cx, ScopeKind kind, Handle<Scope*> enclosing,
+ Handle<SharedShape*> envShape,
+ MutableHandle<UniquePtr<typename ConcreteScope::RuntimeData>> data);
+
+ static const JS::TraceKind TraceKind = JS::TraceKind::Scope;
+
+ template <typename T>
+ bool is() const {
+ return kind_ == T::classScopeKind_;
+ }
+
+ template <typename T>
+ T& as() {
+ MOZ_ASSERT(this->is<T>());
+ return *static_cast<T*>(this);
+ }
+
+ template <typename T>
+ const T& as() const {
+ MOZ_ASSERT(this->is<T>());
+ return *static_cast<const T*>(this);
+ }
+
+ ScopeKind kind() const { return kind_; }
+
+ bool isNamedLambda() const {
+ return kind() == ScopeKind::NamedLambda ||
+ kind() == ScopeKind::StrictNamedLambda;
+ }
+
+ SharedShape* environmentShape() const { return environmentShape_; }
+
+ Scope* enclosing() const { return enclosingScope_; }
+
+ static bool hasEnvironment(ScopeKind kind, bool hasEnvironmentShape = false) {
+ switch (kind) {
+ case ScopeKind::With:
+ case ScopeKind::Global:
+ case ScopeKind::NonSyntactic:
+ return true;
+ default:
+ // If there's a shape, an environment must be created for this scope.
+ return hasEnvironmentShape;
+ }
+ }
+
+ bool hasEnvironment() const {
+ return hasEnvironment(kind_, !!environmentShape());
+ }
+
+ uint32_t firstFrameSlot() const;
+
+ uint32_t chainLength() const;
+ uint32_t environmentChainLength() const;
+
+ template <typename T>
+ bool hasOnChain() const {
+ for (const Scope* it = this; it; it = it->enclosing()) {
+ if (it->is<T>()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool hasOnChain(ScopeKind kind) const {
+ for (const Scope* it = this; it; it = it->enclosing()) {
+ if (it->kind() == kind) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void traceChildren(JSTracer* trc);
+ void finalize(JS::GCContext* gcx);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ void dump();
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ static bool dumpForDisassemble(JSContext* cx, JS::Handle<Scope*> scope,
+ GenericPrinter& out, const char* indent);
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) */
+};
+
+template <class DataT>
+inline size_t SizeOfScopeData(uint32_t length) {
+ using BindingT = AbstractBindingName<typename DataT::NameType>;
+ return GetOffsetOfScopeDataTrailingNames<DataT>() + length * sizeof(BindingT);
+}
+
+//
+// A useful typedef for selecting between a gc-aware wrappers
+// around pointers to BaseScopeData-derived types, and around raw
+// pointer wrappers around BaseParserScopeData-derived types.
+//
+template <typename ScopeT, typename AtomT>
+using AbstractScopeData = typename ScopeT::template AbstractData<AtomT>;
+
+// Binding names are stored from `this+1`.
+// Make sure the class aligns the binding name size.
+template <typename SlotInfo>
+struct alignas(alignof(AbstractBindingName<frontend::TaggedParserAtomIndex>))
+ ParserScopeData
+ : public AbstractBaseScopeData<frontend::TaggedParserAtomIndex> {
+ SlotInfo slotInfo;
+
+ explicit ParserScopeData(size_t length) { PoisonNames(this, length); }
+ ParserScopeData() = delete;
+};
+
+// RuntimeScopeData has 2 requirements:
+// * It aligns with `BindingName`, that is stored after `this+1`
+// * It aligns with ScopeDataAlignBytes, in order to put it in the first
+// word of `js::Scope`
+static_assert(alignof(BindingName) <= ScopeDataAlignBytes);
+template <typename SlotInfo>
+struct alignas(ScopeDataAlignBytes) RuntimeScopeData
+ : public AbstractBaseScopeData<JSAtom> {
+ SlotInfo slotInfo;
+
+ explicit RuntimeScopeData(size_t length) { PoisonNames(this, length); }
+ RuntimeScopeData() = delete;
+
+ void trace(JSTracer* trc);
+};
+
+//
+// A lexical scope that holds let and const bindings. There are 4 kinds of
+// LexicalScopes.
+//
+// Lexical
+// A plain lexical scope.
+//
+// SimpleCatch
+// Holds the single catch parameter of a catch block.
+//
+// Catch
+// Holds the catch parameters (and only the catch parameters) of a catch
+// block.
+//
+// NamedLambda
+// StrictNamedLambda
+// Holds the single name of the callee for a named lambda expression.
+//
+// All kinds of LexicalScopes correspond to LexicalEnvironmentObjects on the
+// environment chain.
+//
+class LexicalScope : public Scope {
+ friend class Scope;
+ friend class AbstractBindingIter<JSAtom>;
+ friend class GCMarker;
+ friend class frontend::ScopeStencil;
+
+ public:
+ struct SlotInfo {
+ // Frame slots [0, nextFrameSlot) are live when this is the innermost
+ // scope.
+ uint32_t nextFrameSlot = 0;
+
+ // Bindings are sorted by kind in both frames and environments.
+ //
+ // lets - [0, constStart)
+ // consts - [constStart, length)
+ uint32_t constStart = 0;
+ };
+
+ using RuntimeData = RuntimeScopeData<SlotInfo>;
+ using ParserData = ParserScopeData<SlotInfo>;
+
+ template <typename NameT>
+ using AbstractData =
+ typename std::conditional_t<std::is_same<NameT, JSAtom>::value,
+ RuntimeData, ParserData>;
+
+ private:
+ static void prepareForScopeCreation(ScopeKind kind, uint32_t firstFrameSlot,
+ LexicalScope::ParserData* data,
+ mozilla::Maybe<uint32_t>* envShape);
+
+ RuntimeData& data() { return *static_cast<RuntimeData*>(rawData()); }
+ const RuntimeData& data() const {
+ return *static_cast<const RuntimeData*>(rawData());
+ }
+
+ public:
+ static uint32_t nextFrameSlot(Scope* scope);
+
+ uint32_t nextFrameSlot() const { return data().slotInfo.nextFrameSlot; }
+
+ // Returns an empty shape for extensible global and non-syntactic lexical
+ // scopes.
+ static SharedShape* getEmptyExtensibleEnvironmentShape(JSContext* cx);
+};
+
+template <>
+inline bool Scope::is<LexicalScope>() const {
+ return kind_ == ScopeKind::Lexical || kind_ == ScopeKind::SimpleCatch ||
+ kind_ == ScopeKind::Catch || kind_ == ScopeKind::NamedLambda ||
+ kind_ == ScopeKind::StrictNamedLambda ||
+ kind_ == ScopeKind::FunctionLexical;
+}
+
+// The body scope of a JS class, containing only synthetic bindings for private
+// class members. (The binding for the class name, `C` in the example below, is
+// in another scope, a `LexicalScope`, that encloses the `ClassBodyScope`.)
+// Example:
+//
+// class C {
+// #f = 0;
+// #m() {
+// return this.#f++;
+// }
+// }
+//
+// This class has a ClassBodyScope with four synthetic bindings:
+// - `#f` (private name)
+// - `#m` (private name)
+// - `#m.method` (function object)
+// - `.privateBrand` (the class's private brand)
+class ClassBodyScope : public Scope {
+ friend class Scope;
+ friend class AbstractBindingIter<JSAtom>;
+ friend class GCMarker;
+ friend class frontend::ScopeStencil;
+ friend class AbstractScopePtr;
+
+ static const ScopeKind classScopeKind_ = ScopeKind::ClassBody;
+
+ public:
+ struct SlotInfo {
+ // Frame slots [0, nextFrameSlot) are live when this is the innermost
+ // scope.
+ uint32_t nextFrameSlot = 0;
+
+ // Bindings are sorted by kind in both frames and environments.
+ //
+ // synthetic - [0, privateMethodStart)
+ // privateMethod - [privateMethodStart, length)
+ uint32_t privateMethodStart = 0;
+ };
+
+ using RuntimeData = RuntimeScopeData<SlotInfo>;
+ using ParserData = ParserScopeData<SlotInfo>;
+
+ template <typename NameT>
+ using AbstractData =
+ typename std::conditional_t<std::is_same<NameT, JSAtom>::value,
+ RuntimeData, ParserData>;
+
+ private:
+ static void prepareForScopeCreation(ScopeKind kind, uint32_t firstFrameSlot,
+ ClassBodyScope::ParserData* data,
+ mozilla::Maybe<uint32_t>* envShape);
+
+ RuntimeData& data() { return *static_cast<RuntimeData*>(rawData()); }
+ const RuntimeData& data() const {
+ return *static_cast<const RuntimeData*>(rawData());
+ }
+
+ public:
+ static uint32_t nextFrameSlot(Scope* scope);
+
+ uint32_t nextFrameSlot() const { return data().slotInfo.nextFrameSlot; }
+
+ // Returns an empty shape for extensible global and non-syntactic lexical
+ // scopes.
+ static SharedShape* getEmptyExtensibleEnvironmentShape(JSContext* cx);
+};
+
+//
+// Scope corresponding to a function. Holds formal parameter names, special
+// internal names (see FunctionScope::isSpecialName), and, if the function
+// parameters contain no expressions that might possibly be evaluated, the
+// function's var bindings. For example, in these functions, the FunctionScope
+// will store a/b/c bindings but not d/e/f bindings:
+//
+// function f1(a, b) {
+// var c;
+// let e;
+// const f = 3;
+// }
+// function f2([a], b = 4, ...c) {
+// var d, e, f; // stored in VarScope
+// }
+//
+// Corresponds to CallObject on environment chain.
+//
+class FunctionScope : public Scope {
+ friend class GCMarker;
+ friend class AbstractBindingIter<JSAtom>;
+ friend class AbstractPositionalFormalParameterIter<JSAtom>;
+ friend class Scope;
+ friend class AbstractScopePtr;
+ static const ScopeKind classScopeKind_ = ScopeKind::Function;
+
+ public:
+ struct SlotInfo {
+ // Frame slots [0, nextFrameSlot) are live when this is the innermost
+ // scope.
+ uint32_t nextFrameSlot = 0;
+
+ // Flag bits.
+ // This uses uint32_t in order to make this struct packed.
+ uint32_t flags = 0;
+
+ // If parameter expressions are present, parameters act like lexical
+ // bindings.
+ static constexpr uint32_t HasParameterExprsFlag = 1;
+
+ // Bindings are sorted by kind in both frames and environments.
+ //
+ // Positional formal parameter names are those that are not
+ // destructured. They may be referred to by argument slots if
+ // !script()->hasParameterExprs().
+ //
+ // An argument slot that needs to be skipped due to being destructured
+ // or having defaults will have a nullptr name in the name array to
+ // advance the argument slot.
+ //
+ // Rest parameter binding is also included in positional formals.
+ // This also becomes nullptr if destructuring.
+ //
+ // The number of positional formals is equal to function.length if
+ // there's no rest, function.length+1 otherwise.
+ //
+ // Destructuring parameters and destructuring rest are included in
+ // "other formals" below.
+ //
+ // "vars" contains the following:
+ // * function's top level vars if !script()->hasParameterExprs()
+ // * special internal names (arguments, .this, .generator) if
+ // they're used.
+ //
+ // positional formals - [0, nonPositionalFormalStart)
+ // other formals - [nonPositionalParamStart, varStart)
+ // vars - [varStart, length)
+ uint16_t nonPositionalFormalStart = 0;
+ uint16_t varStart = 0;
+
+ bool hasParameterExprs() const { return flags & HasParameterExprsFlag; }
+ void setHasParameterExprs() { flags |= HasParameterExprsFlag; }
+ };
+
+ struct alignas(ScopeDataAlignBytes) RuntimeData
+ : public AbstractBaseScopeData<JSAtom> {
+ SlotInfo slotInfo;
+ // The canonical function of the scope, as during a scope walk we
+ // often query properties of the JSFunction (e.g., is the function an
+ // arrow).
+ HeapPtr<JSFunction*> canonicalFunction = {};
+
+ explicit RuntimeData(size_t length) { PoisonNames(this, length); }
+ RuntimeData() = delete;
+
+ void trace(JSTracer* trc);
+ };
+
+ using ParserData = ParserScopeData<SlotInfo>;
+
+ template <typename NameT>
+ using AbstractData =
+ typename std::conditional_t<std::is_same<NameT, JSAtom>::value,
+ RuntimeData, ParserData>;
+
+ static void prepareForScopeCreation(FunctionScope::ParserData* data,
+ bool hasParameterExprs,
+ bool needsEnvironment,
+ mozilla::Maybe<uint32_t>* envShape);
+
+ private:
+ RuntimeData& data() { return *static_cast<RuntimeData*>(rawData()); }
+
+ const RuntimeData& data() const {
+ return *static_cast<const RuntimeData*>(rawData());
+ }
+
+ public:
+ uint32_t nextFrameSlot() const { return data().slotInfo.nextFrameSlot; }
+
+ JSFunction* canonicalFunction() const { return data().canonicalFunction; }
+ void initCanonicalFunction(JSFunction* fun) {
+ data().canonicalFunction.init(fun);
+ }
+
+ JSScript* script() const;
+
+ bool hasParameterExprs() const { return data().slotInfo.hasParameterExprs(); }
+
+ uint32_t numPositionalFormalParameters() const {
+ return data().slotInfo.nonPositionalFormalStart;
+ }
+
+ static bool isSpecialName(frontend::TaggedParserAtomIndex name);
+};
+
+//
+// Scope holding only vars. There is a single kind of VarScopes.
+//
+// FunctionBodyVar
+// Corresponds to the extra var scope present in functions with parameter
+// expressions. See examples in comment above FunctionScope.
+//
+// Corresponds to VarEnvironmentObject on environment chain.
+//
+class VarScope : public Scope {
+ friend class GCMarker;
+ friend class AbstractBindingIter<JSAtom>;
+ friend class Scope;
+ friend class frontend::ScopeStencil;
+
+ public:
+ struct SlotInfo {
+ // Frame slots [0, nextFrameSlot) are live when this is the innermost
+ // scope.
+ uint32_t nextFrameSlot = 0;
+
+ // All bindings are vars.
+ //
+ // vars - [0, length)
+ };
+
+ using RuntimeData = RuntimeScopeData<SlotInfo>;
+ using ParserData = ParserScopeData<SlotInfo>;
+
+ template <typename NameT>
+ using AbstractData =
+ typename std::conditional_t<std::is_same<NameT, JSAtom>::value,
+ RuntimeData, ParserData>;
+
+ private:
+ static void prepareForScopeCreation(ScopeKind kind,
+ VarScope::ParserData* data,
+ uint32_t firstFrameSlot,
+ bool needsEnvironment,
+ mozilla::Maybe<uint32_t>* envShape);
+
+ RuntimeData& data() { return *static_cast<RuntimeData*>(rawData()); }
+
+ const RuntimeData& data() const {
+ return *static_cast<const RuntimeData*>(rawData());
+ }
+
+ public:
+ uint32_t nextFrameSlot() const { return data().slotInfo.nextFrameSlot; }
+};
+
+template <>
+inline bool Scope::is<VarScope>() const {
+ return kind_ == ScopeKind::FunctionBodyVar;
+}
+
+//
+// Scope corresponding to both the global object scope and the global lexical
+// scope.
+//
+// Both are extensible and are singletons across <script> tags, so these
+// scopes are a fragment of the names in global scope. In other words, two
+// global scripts may have two different GlobalScopes despite having the same
+// GlobalObject.
+//
+// There are 2 kinds of GlobalScopes.
+//
+// Global
+// Corresponds to a GlobalObject and its GlobalLexicalEnvironmentObject on
+// the environment chain.
+//
+// NonSyntactic
+// Corresponds to a non-GlobalObject created by the embedding on the
+// environment chain. This distinction is important for optimizations.
+//
+class GlobalScope : public Scope {
+ friend class Scope;
+ friend class AbstractBindingIter<JSAtom>;
+ friend class GCMarker;
+
+ public:
+ struct SlotInfo {
+ // Bindings are sorted by kind.
+ // `vars` includes top-level functions which is distinguished by a bit
+ // on the BindingName.
+ //
+ // vars - [0, letStart)
+ // lets - [letStart, constStart)
+ // consts - [constStart, length)
+ uint32_t letStart = 0;
+ uint32_t constStart = 0;
+ };
+
+ using RuntimeData = RuntimeScopeData<SlotInfo>;
+ using ParserData = ParserScopeData<SlotInfo>;
+
+ template <typename NameT>
+ using AbstractData =
+ typename std::conditional_t<std::is_same<NameT, JSAtom>::value,
+ RuntimeData, ParserData>;
+
+ static GlobalScope* createEmpty(JSContext* cx, ScopeKind kind);
+
+ private:
+ static GlobalScope* createWithData(
+ JSContext* cx, ScopeKind kind,
+ MutableHandle<UniquePtr<RuntimeData>> data);
+
+ RuntimeData& data() { return *static_cast<RuntimeData*>(rawData()); }
+
+ const RuntimeData& data() const {
+ return *static_cast<const RuntimeData*>(rawData());
+ }
+
+ public:
+ bool isSyntactic() const { return kind() != ScopeKind::NonSyntactic; }
+
+ bool hasBindings() const { return data().length > 0; }
+};
+
+template <>
+inline bool Scope::is<GlobalScope>() const {
+ return kind_ == ScopeKind::Global || kind_ == ScopeKind::NonSyntactic;
+}
+
+//
+// Scope of a 'with' statement. Has no bindings.
+//
+// Corresponds to a WithEnvironmentObject on the environment chain.
+class WithScope : public Scope {
+ friend class Scope;
+ friend class AbstractScopePtr;
+ static const ScopeKind classScopeKind_ = ScopeKind::With;
+
+ public:
+ static WithScope* create(JSContext* cx, Handle<Scope*> enclosing);
+};
+
+//
+// Scope of an eval. Holds var bindings. There are 2 kinds of EvalScopes.
+//
+// StrictEval
+// A strict eval. Corresponds to a VarEnvironmentObject, where its var
+// bindings lives.
+//
+// Eval
+// A sloppy eval. This is an empty scope, used only in the frontend, to
+// detect redeclaration errors. It has no Environment. Any `var`s declared
+// in the eval code are bound on the nearest enclosing var environment.
+//
+class EvalScope : public Scope {
+ friend class Scope;
+ friend class AbstractBindingIter<JSAtom>;
+ friend class GCMarker;
+ friend class frontend::ScopeStencil;
+
+ public:
+ struct SlotInfo {
+ // Frame slots [0, nextFrameSlot) are live when this is the innermost
+ // scope.
+ uint32_t nextFrameSlot = 0;
+
+ // All bindings in an eval script are 'var' bindings. The implicit
+ // lexical scope around the eval is present regardless of strictness
+ // and is its own LexicalScope.
+ // `vars` includes top-level functions which is distinguished by a bit
+ // on the BindingName.
+ //
+ // vars - [0, length)
+ };
+
+ using RuntimeData = RuntimeScopeData<SlotInfo>;
+ using ParserData = ParserScopeData<SlotInfo>;
+
+ template <typename NameT>
+ using AbstractData =
+ typename std::conditional_t<std::is_same<NameT, JSAtom>::value,
+ RuntimeData, ParserData>;
+
+ private:
+ static void prepareForScopeCreation(ScopeKind scopeKind,
+ EvalScope::ParserData* data,
+ mozilla::Maybe<uint32_t>* envShape);
+
+ RuntimeData& data() { return *static_cast<RuntimeData*>(rawData()); }
+
+ const RuntimeData& data() const {
+ return *static_cast<const RuntimeData*>(rawData());
+ }
+
+ public:
+ // Starting a scope, the nearest var scope that a direct eval can
+ // introduce vars on.
+ static Scope* nearestVarScopeForDirectEval(Scope* scope);
+
+ uint32_t nextFrameSlot() const { return data().slotInfo.nextFrameSlot; }
+
+ bool strict() const { return kind() == ScopeKind::StrictEval; }
+
+ bool hasBindings() const { return data().length > 0; }
+
+ bool isNonGlobal() const {
+ if (strict()) {
+ return true;
+ }
+ return !nearestVarScopeForDirectEval(enclosing())->is<GlobalScope>();
+ }
+};
+
+template <>
+inline bool Scope::is<EvalScope>() const {
+ return kind_ == ScopeKind::Eval || kind_ == ScopeKind::StrictEval;
+}
+
+//
+// Scope corresponding to the toplevel script in an ES module.
+//
+// Like GlobalScopes, these scopes contain both vars and lexical bindings, as
+// the treating of imports and exports requires putting them in one scope.
+//
+// Corresponds to a ModuleEnvironmentObject on the environment chain.
+//
+class ModuleScope : public Scope {
+ friend class GCMarker;
+ friend class AbstractBindingIter<JSAtom>;
+ friend class Scope;
+ friend class AbstractScopePtr;
+ friend class frontend::ScopeStencil;
+ static const ScopeKind classScopeKind_ = ScopeKind::Module;
+
+ public:
+ struct SlotInfo {
+ // Frame slots [0, nextFrameSlot) are live when this is the innermost
+ // scope.
+ uint32_t nextFrameSlot = 0;
+
+ // Bindings are sorted by kind.
+ //
+ // imports - [0, varStart)
+ // vars - [varStart, letStart)
+ // lets - [letStart, constStart)
+ // consts - [constStart, length)
+ uint32_t varStart = 0;
+ uint32_t letStart = 0;
+ uint32_t constStart = 0;
+ };
+
+ struct alignas(ScopeDataAlignBytes) RuntimeData
+ : public AbstractBaseScopeData<JSAtom> {
+ SlotInfo slotInfo;
+ // The module of the scope.
+ HeapPtr<ModuleObject*> module = {};
+
+ explicit RuntimeData(size_t length);
+ RuntimeData() = delete;
+
+ void trace(JSTracer* trc);
+ };
+
+ using ParserData = ParserScopeData<SlotInfo>;
+
+ template <typename NameT>
+ using AbstractData =
+ typename std::conditional_t<std::is_same<NameT, JSAtom>::value,
+ RuntimeData, ParserData>;
+
+ private:
+ static void prepareForScopeCreation(ModuleScope::ParserData* data,
+ mozilla::Maybe<uint32_t>* envShape);
+
+ RuntimeData& data() { return *static_cast<RuntimeData*>(rawData()); }
+
+ const RuntimeData& data() const {
+ return *static_cast<const RuntimeData*>(rawData());
+ }
+
+ public:
+ uint32_t nextFrameSlot() const { return data().slotInfo.nextFrameSlot; }
+
+ ModuleObject* module() const { return data().module; }
+ void initModule(ModuleObject* mod) { return data().module.init(mod); }
+
+ // Off-thread compilation needs to calculate environmentChainLength for
+ // an emptyGlobalScope where the global may not be available.
+ static const size_t EnclosingEnvironmentChainLength = 1;
+};
+
+class WasmInstanceScope : public Scope {
+ friend class AbstractBindingIter<JSAtom>;
+ friend class Scope;
+ friend class GCMarker;
+ friend class AbstractScopePtr;
+ static const ScopeKind classScopeKind_ = ScopeKind::WasmInstance;
+
+ public:
+ struct SlotInfo {
+ // Frame slots [0, nextFrameSlot) are live when this is the innermost
+ // scope.
+ uint32_t nextFrameSlot = 0;
+
+ // Bindings list the WASM memories and globals.
+ //
+ // memories - [0, globalsStart)
+ // globals - [globalsStart, length)
+ uint32_t globalsStart = 0;
+ };
+
+ struct alignas(ScopeDataAlignBytes) RuntimeData
+ : public AbstractBaseScopeData<JSAtom> {
+ SlotInfo slotInfo;
+ // The wasm instance of the scope.
+ HeapPtr<WasmInstanceObject*> instance = {};
+
+ explicit RuntimeData(size_t length);
+ RuntimeData() = delete;
+
+ void trace(JSTracer* trc);
+ };
+
+ using ParserData = ParserScopeData<SlotInfo>;
+
+ template <typename NameT>
+ using AbstractData =
+ typename std::conditional_t<std::is_same<NameT, JSAtom>::value,
+ RuntimeData, ParserData>;
+
+ static WasmInstanceScope* create(JSContext* cx, WasmInstanceObject* instance);
+
+ private:
+ RuntimeData& data() { return *static_cast<RuntimeData*>(rawData()); }
+
+ const RuntimeData& data() const {
+ return *static_cast<const RuntimeData*>(rawData());
+ }
+
+ public:
+ WasmInstanceObject* instance() const { return data().instance; }
+
+ uint32_t memoriesStart() const { return 0; }
+
+ uint32_t globalsStart() const { return data().slotInfo.globalsStart; }
+
+ uint32_t namesCount() const { return data().length; }
+};
+
+// Scope corresponding to the wasm function. A WasmFunctionScope is used by
+// Debugger only, and not for wasm execution.
+//
+class WasmFunctionScope : public Scope {
+ friend class AbstractBindingIter<JSAtom>;
+ friend class Scope;
+ friend class GCMarker;
+ friend class AbstractScopePtr;
+ static const ScopeKind classScopeKind_ = ScopeKind::WasmFunction;
+
+ public:
+ struct SlotInfo {
+ // Frame slots [0, nextFrameSlot) are live when this is the innermost
+ // scope.
+ uint32_t nextFrameSlot = 0;
+
+ // Bindings are the local variable names.
+ //
+ // vars - [0, length)
+ };
+
+ using RuntimeData = RuntimeScopeData<SlotInfo>;
+ using ParserData = ParserScopeData<SlotInfo>;
+
+ template <typename NameT>
+ using AbstractData =
+ typename std::conditional_t<std::is_same<NameT, JSAtom>::value,
+ RuntimeData, ParserData>;
+
+ static WasmFunctionScope* create(JSContext* cx, Handle<Scope*> enclosing,
+ uint32_t funcIndex);
+
+ private:
+ RuntimeData& data() { return *static_cast<RuntimeData*>(rawData()); }
+
+ const RuntimeData& data() const {
+ return *static_cast<const RuntimeData*>(rawData());
+ }
+};
+
+template <typename F>
+void Scope::applyScopeDataTyped(F&& f) {
+ switch (kind()) {
+ case ScopeKind::Function: {
+ f(&as<FunctionScope>().data());
+ break;
+ case ScopeKind::FunctionBodyVar:
+ f(&as<VarScope>().data());
+ break;
+ case ScopeKind::Lexical:
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ case ScopeKind::NamedLambda:
+ case ScopeKind::StrictNamedLambda:
+ case ScopeKind::FunctionLexical:
+ f(&as<LexicalScope>().data());
+ break;
+ case ScopeKind::ClassBody:
+ f(&as<ClassBodyScope>().data());
+ break;
+ case ScopeKind::With:
+ // With scopes do not have data.
+ break;
+ case ScopeKind::Eval:
+ case ScopeKind::StrictEval:
+ f(&as<EvalScope>().data());
+ break;
+ case ScopeKind::Global:
+ case ScopeKind::NonSyntactic:
+ f(&as<GlobalScope>().data());
+ break;
+ case ScopeKind::Module:
+ f(&as<ModuleScope>().data());
+ break;
+ case ScopeKind::WasmInstance:
+ f(&as<WasmInstanceScope>().data());
+ break;
+ case ScopeKind::WasmFunction:
+ f(&as<WasmFunctionScope>().data());
+ break;
+ }
+ }
+}
+
+//
+// An iterator for a Scope's bindings. This is the source of truth for frame
+// and environment object layout.
+//
+// It may be placed in GC containers; for example:
+//
+// for (Rooted<BindingIter> bi(cx, BindingIter(scope)); bi; bi++) {
+// use(bi);
+// SomeMayGCOperation();
+// use(bi);
+// }
+//
+template <typename NameT>
+class BaseAbstractBindingIter {
+ protected:
+ // Bindings are sorted by kind. Because different Scopes have differently
+ // laid out {Runtime,Parser}Data for packing, BindingIter must handle all
+ // binding kinds.
+ //
+ // Kind ranges:
+ //
+ // imports - [0, positionalFormalStart)
+ // positional formals - [positionalFormalStart, nonPositionalFormalStart)
+ // other formals - [nonPositionalParamStart, varStart)
+ // vars - [varStart, letStart)
+ // lets - [letStart, constStart)
+ // consts - [constStart, syntheticStart)
+ // synthetic - [syntheticStart, privateMethodStart)
+ // private methods = [privateMethodStart, length)
+ //
+ // Access method when not closed over:
+ //
+ // imports - name
+ // positional formals - argument slot
+ // other formals - frame slot
+ // vars - frame slot
+ // lets - frame slot
+ // consts - frame slot
+ // synthetic - frame slot
+ // private methods - frame slot
+ //
+ // Access method when closed over:
+ //
+ // imports - name
+ // positional formals - environment slot or name
+ // other formals - environment slot or name
+ // vars - environment slot or name
+ // lets - environment slot or name
+ // consts - environment slot or name
+ // synthetic - environment slot or name
+ // private methods - environment slot or name
+ MOZ_INIT_OUTSIDE_CTOR uint32_t positionalFormalStart_;
+ MOZ_INIT_OUTSIDE_CTOR uint32_t nonPositionalFormalStart_;
+ MOZ_INIT_OUTSIDE_CTOR uint32_t varStart_;
+ MOZ_INIT_OUTSIDE_CTOR uint32_t letStart_;
+ MOZ_INIT_OUTSIDE_CTOR uint32_t constStart_;
+ MOZ_INIT_OUTSIDE_CTOR uint32_t syntheticStart_;
+ MOZ_INIT_OUTSIDE_CTOR uint32_t privateMethodStart_;
+ MOZ_INIT_OUTSIDE_CTOR uint32_t length_;
+
+ MOZ_INIT_OUTSIDE_CTOR uint32_t index_;
+
+ enum Flags : uint8_t {
+ CannotHaveSlots = 0,
+ CanHaveArgumentSlots = 1 << 0,
+ CanHaveFrameSlots = 1 << 1,
+ CanHaveEnvironmentSlots = 1 << 2,
+
+ // See comment in settle below.
+ HasFormalParameterExprs = 1 << 3,
+ IgnoreDestructuredFormalParameters = 1 << 4,
+
+ // Truly I hate named lambdas.
+ IsNamedLambda = 1 << 5
+ };
+
+ static const uint8_t CanHaveSlotsMask = 0x7;
+
+ MOZ_INIT_OUTSIDE_CTOR uint8_t flags_;
+ MOZ_INIT_OUTSIDE_CTOR uint16_t argumentSlot_;
+ MOZ_INIT_OUTSIDE_CTOR uint32_t frameSlot_;
+ MOZ_INIT_OUTSIDE_CTOR uint32_t environmentSlot_;
+
+ MOZ_INIT_OUTSIDE_CTOR AbstractBindingName<NameT>* names_;
+
+ void init(uint32_t positionalFormalStart, uint32_t nonPositionalFormalStart,
+ uint32_t varStart, uint32_t letStart, uint32_t constStart,
+ uint32_t syntheticStart, uint32_t privateMethodStart, uint8_t flags,
+ uint32_t firstFrameSlot, uint32_t firstEnvironmentSlot,
+ mozilla::Span<AbstractBindingName<NameT>> names) {
+ positionalFormalStart_ = positionalFormalStart;
+ nonPositionalFormalStart_ = nonPositionalFormalStart;
+ varStart_ = varStart;
+ letStart_ = letStart;
+ constStart_ = constStart;
+ syntheticStart_ = syntheticStart;
+ privateMethodStart_ = privateMethodStart;
+ length_ = names.size();
+
+ index_ = 0;
+ flags_ = flags;
+ argumentSlot_ = 0;
+ frameSlot_ = firstFrameSlot;
+ environmentSlot_ = firstEnvironmentSlot;
+ names_ = names.data();
+
+ settle();
+ }
+
+ void init(LexicalScope::AbstractData<NameT>& data, uint32_t firstFrameSlot,
+ uint8_t flags);
+
+ void init(ClassBodyScope::AbstractData<NameT>& data, uint32_t firstFrameSlot);
+ void init(FunctionScope::AbstractData<NameT>& data, uint8_t flags);
+
+ void init(VarScope::AbstractData<NameT>& data, uint32_t firstFrameSlot);
+ void init(GlobalScope::AbstractData<NameT>& data);
+ void init(EvalScope::AbstractData<NameT>& data, bool strict);
+ void init(ModuleScope::AbstractData<NameT>& data);
+ void init(WasmInstanceScope::AbstractData<NameT>& data);
+ void init(WasmFunctionScope::AbstractData<NameT>& data);
+
+ bool hasFormalParameterExprs() const {
+ return flags_ & HasFormalParameterExprs;
+ }
+
+ bool ignoreDestructuredFormalParameters() const {
+ return flags_ & IgnoreDestructuredFormalParameters;
+ }
+
+ bool isNamedLambda() const { return flags_ & IsNamedLambda; }
+
+ void increment() {
+ MOZ_ASSERT(!done());
+ if (flags_ & CanHaveSlotsMask) {
+ if (canHaveArgumentSlots()) {
+ if (index_ < nonPositionalFormalStart_) {
+ MOZ_ASSERT(index_ >= positionalFormalStart_);
+ argumentSlot_++;
+ }
+ }
+ if (closedOver()) {
+ // Imports must not be given known slots. They are
+ // indirect bindings.
+ MOZ_ASSERT(kind() != BindingKind::Import);
+ MOZ_ASSERT(canHaveEnvironmentSlots());
+ environmentSlot_++;
+ } else if (canHaveFrameSlots()) {
+ // Usually positional formal parameters don't have frame
+ // slots, except when there are parameter expressions, in
+ // which case they act like lets.
+ if (index_ >= nonPositionalFormalStart_ ||
+ (hasFormalParameterExprs() && name())) {
+ frameSlot_++;
+ }
+ }
+ }
+ index_++;
+ }
+
+ void settle() {
+ if (ignoreDestructuredFormalParameters()) {
+ while (!done() && !name()) {
+ increment();
+ }
+ }
+ }
+
+ BaseAbstractBindingIter() = default;
+
+ public:
+ BaseAbstractBindingIter(LexicalScope::AbstractData<NameT>& data,
+ uint32_t firstFrameSlot, bool isNamedLambda) {
+ init(data, firstFrameSlot, isNamedLambda ? IsNamedLambda : 0);
+ }
+
+ BaseAbstractBindingIter(ClassBodyScope::AbstractData<NameT>& data,
+ uint32_t firstFrameSlot) {
+ init(data, firstFrameSlot);
+ }
+
+ BaseAbstractBindingIter(FunctionScope::AbstractData<NameT>& data,
+ bool hasParameterExprs) {
+ init(data, IgnoreDestructuredFormalParameters |
+ (hasParameterExprs ? HasFormalParameterExprs : 0));
+ }
+
+ BaseAbstractBindingIter(VarScope::AbstractData<NameT>& data,
+ uint32_t firstFrameSlot) {
+ init(data, firstFrameSlot);
+ }
+
+ explicit BaseAbstractBindingIter(GlobalScope::AbstractData<NameT>& data) {
+ init(data);
+ }
+
+ explicit BaseAbstractBindingIter(ModuleScope::AbstractData<NameT>& data) {
+ init(data);
+ }
+
+ explicit BaseAbstractBindingIter(
+ WasmFunctionScope::AbstractData<NameT>& data) {
+ init(data);
+ }
+
+ BaseAbstractBindingIter(EvalScope::AbstractData<NameT>& data, bool strict) {
+ init(data, strict);
+ }
+
+ MOZ_IMPLICIT BaseAbstractBindingIter(
+ const BaseAbstractBindingIter<NameT>& bi) = default;
+
+ bool done() const { return index_ == length_; }
+
+ explicit operator bool() const { return !done(); }
+
+ void operator++(int) {
+ increment();
+ settle();
+ }
+
+ bool isLast() const {
+ MOZ_ASSERT(!done());
+ return index_ + 1 == length_;
+ }
+
+ bool canHaveArgumentSlots() const { return flags_ & CanHaveArgumentSlots; }
+
+ bool canHaveFrameSlots() const { return flags_ & CanHaveFrameSlots; }
+
+ bool canHaveEnvironmentSlots() const {
+ return flags_ & CanHaveEnvironmentSlots;
+ }
+
+ typename AbstractBindingName<NameT>::NamePointerT name() const {
+ MOZ_ASSERT(!done());
+ return names_[index_].name();
+ }
+
+ bool closedOver() const {
+ MOZ_ASSERT(!done());
+ return names_[index_].closedOver();
+ }
+
+ BindingLocation location() const {
+ MOZ_ASSERT(!done());
+ if (!(flags_ & CanHaveSlotsMask)) {
+ return BindingLocation::Global();
+ }
+ if (index_ < positionalFormalStart_) {
+ return BindingLocation::Import();
+ }
+ if (closedOver()) {
+ MOZ_ASSERT(canHaveEnvironmentSlots());
+ return BindingLocation::Environment(environmentSlot_);
+ }
+ if (index_ < nonPositionalFormalStart_ && canHaveArgumentSlots()) {
+ return BindingLocation::Argument(argumentSlot_);
+ }
+ if (canHaveFrameSlots()) {
+ return BindingLocation::Frame(frameSlot_);
+ }
+ MOZ_ASSERT(isNamedLambda());
+ return BindingLocation::NamedLambdaCallee();
+ }
+
+ BindingKind kind() const {
+ MOZ_ASSERT(!done());
+ if (index_ < positionalFormalStart_) {
+ return BindingKind::Import;
+ }
+ if (index_ < varStart_) {
+ // When the parameter list has expressions, the parameters act
+ // like lexical bindings and have TDZ.
+ if (hasFormalParameterExprs()) {
+ return BindingKind::Let;
+ }
+ return BindingKind::FormalParameter;
+ }
+ if (index_ < letStart_) {
+ return BindingKind::Var;
+ }
+ if (index_ < constStart_) {
+ return BindingKind::Let;
+ }
+ if (index_ < syntheticStart_) {
+ return isNamedLambda() ? BindingKind::NamedLambdaCallee
+ : BindingKind::Const;
+ }
+ if (index_ < privateMethodStart_) {
+ return BindingKind::Synthetic;
+ }
+ return BindingKind::PrivateMethod;
+ }
+
+ js::frontend::NameLocation nameLocation() const {
+ using js::frontend::NameLocation;
+
+ BindingKind bindKind = kind();
+ BindingLocation bl = location();
+ switch (bl.kind()) {
+ case BindingLocation::Kind::Global:
+ return NameLocation::Global(bindKind);
+ case BindingLocation::Kind::Argument:
+ return NameLocation::ArgumentSlot(bl.argumentSlot());
+ case BindingLocation::Kind::Frame:
+ return NameLocation::FrameSlot(bindKind, bl.slot());
+ case BindingLocation::Kind::Environment:
+ return NameLocation::EnvironmentCoordinate(bindKind, 0, bl.slot());
+ case BindingLocation::Kind::Import:
+ return NameLocation::Import();
+ case BindingLocation::Kind::NamedLambdaCallee:
+ return NameLocation::NamedLambdaCallee();
+ }
+ MOZ_CRASH("Bad BindingKind");
+ }
+
+ bool isTopLevelFunction() const {
+ MOZ_ASSERT(!done());
+ bool result = names_[index_].isTopLevelFunction();
+ MOZ_ASSERT_IF(result, kind() == BindingKind::Var);
+ return result;
+ }
+
+ bool hasArgumentSlot() const {
+ MOZ_ASSERT(!done());
+ if (hasFormalParameterExprs()) {
+ return false;
+ }
+ return index_ >= positionalFormalStart_ &&
+ index_ < nonPositionalFormalStart_;
+ }
+
+ uint16_t argumentSlot() const {
+ MOZ_ASSERT(canHaveArgumentSlots());
+ return mozilla::AssertedCast<uint16_t>(index_);
+ }
+
+ uint32_t nextFrameSlot() const {
+ MOZ_ASSERT(canHaveFrameSlots());
+ return frameSlot_;
+ }
+
+ uint32_t nextEnvironmentSlot() const {
+ MOZ_ASSERT(canHaveEnvironmentSlots());
+ return environmentSlot_;
+ }
+};
+
+template <typename NameT>
+class AbstractBindingIter;
+
+template <>
+class AbstractBindingIter<JSAtom> : public BaseAbstractBindingIter<JSAtom> {
+ using Base = BaseAbstractBindingIter<JSAtom>;
+
+ public:
+ AbstractBindingIter(ScopeKind kind, BaseScopeData* data,
+ uint32_t firstFrameSlot);
+
+ explicit AbstractBindingIter(Scope* scope);
+ explicit AbstractBindingIter(JSScript* script);
+
+ using Base::Base;
+
+ inline void trace(JSTracer* trc) {
+ TraceNullableBindingNames(trc, names_, length_);
+ }
+};
+
+template <>
+class AbstractBindingIter<frontend::TaggedParserAtomIndex>
+ : public BaseAbstractBindingIter<frontend::TaggedParserAtomIndex> {
+ using Base = BaseAbstractBindingIter<frontend::TaggedParserAtomIndex>;
+
+ public:
+ explicit AbstractBindingIter(const frontend::ScopeStencilRef& ref);
+
+ using Base::Base;
+};
+
+void DumpBindings(JSContext* cx, Scope* scope);
+JSAtom* FrameSlotName(JSScript* script, jsbytecode* pc);
+
+SharedShape* EmptyEnvironmentShape(JSContext* cx, const JSClass* cls,
+ uint32_t numSlots, ObjectFlags objectFlags);
+
+template <class T>
+SharedShape* EmptyEnvironmentShape(JSContext* cx) {
+ return EmptyEnvironmentShape(cx, &T::class_, T::RESERVED_SLOTS,
+ T::OBJECT_FLAGS);
+}
+
+//
+// PositionalFormalParameterIter is a refinement BindingIter that only iterates
+// over positional formal parameters of a function.
+//
+template <typename NameT>
+class BasePositionalFormalParamterIter : public AbstractBindingIter<NameT> {
+ using Base = AbstractBindingIter<NameT>;
+
+ protected:
+ void settle() {
+ if (this->index_ >= this->nonPositionalFormalStart_) {
+ this->index_ = this->length_;
+ }
+ }
+
+ public:
+ using Base::Base;
+
+ void operator++(int) {
+ Base::operator++(1);
+ settle();
+ }
+
+ bool isDestructured() const { return !this->name(); }
+};
+
+template <typename NameT>
+class AbstractPositionalFormalParameterIter;
+
+template <>
+class AbstractPositionalFormalParameterIter<JSAtom>
+ : public BasePositionalFormalParamterIter<JSAtom> {
+ using Base = BasePositionalFormalParamterIter<JSAtom>;
+
+ public:
+ explicit AbstractPositionalFormalParameterIter(Scope* scope);
+ explicit AbstractPositionalFormalParameterIter(JSScript* script);
+
+ using Base::Base;
+};
+
+template <>
+class AbstractPositionalFormalParameterIter<frontend::TaggedParserAtomIndex>
+ : public BasePositionalFormalParamterIter<frontend::TaggedParserAtomIndex> {
+ using Base =
+ BasePositionalFormalParamterIter<frontend::TaggedParserAtomIndex>;
+
+ public:
+ AbstractPositionalFormalParameterIter(
+ FunctionScope::AbstractData<frontend::TaggedParserAtomIndex>& data,
+ bool hasParameterExprs)
+ : Base(data, hasParameterExprs) {
+ settle();
+ }
+
+ using Base::Base;
+};
+
+using PositionalFormalParameterIter =
+ AbstractPositionalFormalParameterIter<JSAtom>;
+
+//
+// Iterator for walking the scope chain.
+//
+// It may be placed in GC containers; for example:
+//
+// for (Rooted<ScopeIter> si(cx, ScopeIter(scope)); si; si++) {
+// use(si);
+// SomeMayGCOperation();
+// use(si);
+// }
+//
+class MOZ_STACK_CLASS ScopeIter {
+ Scope* scope_;
+
+ public:
+ explicit ScopeIter(Scope* scope) : scope_(scope) {}
+
+ explicit ScopeIter(JSScript* script);
+
+ explicit ScopeIter(const ScopeIter& si) = default;
+
+ bool done() const { return !scope_; }
+
+ explicit operator bool() const { return !done(); }
+
+ void operator++(int) {
+ MOZ_ASSERT(!done());
+ scope_ = scope_->enclosing();
+ }
+
+ Scope* scope() const {
+ MOZ_ASSERT(!done());
+ return scope_;
+ }
+
+ ScopeKind kind() const {
+ MOZ_ASSERT(!done());
+ return scope_->kind();
+ }
+
+ // Returns the shape of the environment if it is known. It is possible to
+ // hasSyntacticEnvironment and to have no known shape, e.g., eval.
+ SharedShape* environmentShape() const { return scope()->environmentShape(); }
+
+ // Returns whether this scope has a syntactic environment (i.e., an
+ // Environment that isn't a non-syntactic With or NonSyntacticVariables)
+ // on the environment chain.
+ bool hasSyntacticEnvironment() const;
+
+ void trace(JSTracer* trc) {
+ if (scope_) {
+ TraceRoot(trc, &scope_, "scope iter scope");
+ }
+ }
+};
+
+//
+// Specializations of Rooted containers for the iterators.
+//
+
+template <typename Wrapper>
+class WrappedPtrOperations<BindingIter, Wrapper> {
+ const BindingIter& iter() const {
+ return static_cast<const Wrapper*>(this)->get();
+ }
+
+ public:
+ bool done() const { return iter().done(); }
+ explicit operator bool() const { return !done(); }
+ bool isLast() const { return iter().isLast(); }
+ bool canHaveArgumentSlots() const { return iter().canHaveArgumentSlots(); }
+ bool canHaveFrameSlots() const { return iter().canHaveFrameSlots(); }
+ bool canHaveEnvironmentSlots() const {
+ return iter().canHaveEnvironmentSlots();
+ }
+ JSAtom* name() const { return iter().name(); }
+ bool closedOver() const { return iter().closedOver(); }
+ BindingLocation location() const { return iter().location(); }
+ BindingKind kind() const { return iter().kind(); }
+ bool isTopLevelFunction() const { return iter().isTopLevelFunction(); }
+ bool hasArgumentSlot() const { return iter().hasArgumentSlot(); }
+ uint16_t argumentSlot() const { return iter().argumentSlot(); }
+ uint32_t nextFrameSlot() const { return iter().nextFrameSlot(); }
+ uint32_t nextEnvironmentSlot() const { return iter().nextEnvironmentSlot(); }
+};
+
+template <typename Wrapper>
+class MutableWrappedPtrOperations<BindingIter, Wrapper>
+ : public WrappedPtrOperations<BindingIter, Wrapper> {
+ BindingIter& iter() { return static_cast<Wrapper*>(this)->get(); }
+
+ public:
+ void operator++(int) { iter().operator++(1); }
+};
+
+template <typename Wrapper>
+class WrappedPtrOperations<ScopeIter, Wrapper> {
+ const ScopeIter& iter() const {
+ return static_cast<const Wrapper*>(this)->get();
+ }
+
+ public:
+ bool done() const { return iter().done(); }
+ explicit operator bool() const { return !done(); }
+ Scope* scope() const { return iter().scope(); }
+ ScopeKind kind() const { return iter().kind(); }
+ SharedShape* environmentShape() const { return iter().environmentShape(); }
+ bool hasSyntacticEnvironment() const {
+ return iter().hasSyntacticEnvironment();
+ }
+};
+
+template <typename Wrapper>
+class MutableWrappedPtrOperations<ScopeIter, Wrapper>
+ : public WrappedPtrOperations<ScopeIter, Wrapper> {
+ ScopeIter& iter() { return static_cast<Wrapper*>(this)->get(); }
+
+ public:
+ void operator++(int) { iter().operator++(1); }
+};
+
+SharedShape* CreateEnvironmentShape(JSContext* cx, BindingIter& bi,
+ const JSClass* cls, uint32_t numSlots,
+ ObjectFlags objectFlags);
+
+SharedShape* EmptyEnvironmentShape(JSContext* cx, const JSClass* cls,
+ uint32_t numSlots, ObjectFlags objectFlags);
+
+static inline size_t GetOffsetOfParserScopeDataTrailingNames(ScopeKind kind) {
+ switch (kind) {
+ // FunctionScope
+ case ScopeKind::Function:
+ return GetOffsetOfScopeDataTrailingNames<FunctionScope::ParserData>();
+
+ // VarScope
+ case ScopeKind::FunctionBodyVar:
+ return GetOffsetOfScopeDataTrailingNames<VarScope::ParserData>();
+
+ // LexicalScope
+ case ScopeKind::Lexical:
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ case ScopeKind::NamedLambda:
+ case ScopeKind::StrictNamedLambda:
+ case ScopeKind::FunctionLexical:
+ return GetOffsetOfScopeDataTrailingNames<LexicalScope::ParserData>();
+
+ // ClassBodyScope
+ case ScopeKind::ClassBody:
+ return GetOffsetOfScopeDataTrailingNames<ClassBodyScope::ParserData>();
+
+ // EvalScope
+ case ScopeKind::Eval:
+ case ScopeKind::StrictEval:
+ return GetOffsetOfScopeDataTrailingNames<EvalScope::ParserData>();
+
+ // GlobalScope
+ case ScopeKind::Global:
+ case ScopeKind::NonSyntactic:
+ return GetOffsetOfScopeDataTrailingNames<GlobalScope::ParserData>();
+
+ // ModuleScope
+ case ScopeKind::Module:
+ return GetOffsetOfScopeDataTrailingNames<ModuleScope::ParserData>();
+
+ // WasmInstanceScope
+ case ScopeKind::WasmInstance:
+ return GetOffsetOfScopeDataTrailingNames<WasmInstanceScope::ParserData>();
+
+ // WasmFunctionScope
+ case ScopeKind::WasmFunction:
+ return GetOffsetOfScopeDataTrailingNames<WasmFunctionScope::ParserData>();
+
+ // WithScope doesn't have ScopeData.
+ case ScopeKind::With:
+ default:
+ MOZ_CRASH("Unexpected ScopeKind");
+ }
+
+ return 0;
+}
+
+inline size_t SizeOfParserScopeData(ScopeKind kind, uint32_t length) {
+ return GetOffsetOfParserScopeDataTrailingNames(kind) +
+ sizeof(AbstractBindingName<frontend::TaggedParserAtomIndex>) * length;
+}
+
+inline mozilla::Span<AbstractBindingName<frontend::TaggedParserAtomIndex>>
+GetParserScopeDataTrailingNames(
+ ScopeKind kind,
+ AbstractBaseScopeData<frontend::TaggedParserAtomIndex>* data) {
+ return mozilla::Span(
+ reinterpret_cast<AbstractBindingName<frontend::TaggedParserAtomIndex>*>(
+ uintptr_t(data) + GetOffsetOfParserScopeDataTrailingNames(kind)),
+ data->length);
+}
+
+} // namespace js
+
+namespace JS {
+
+template <>
+struct GCPolicy<js::ScopeKind> : public IgnoreGCPolicy<js::ScopeKind> {};
+
+template <typename T>
+struct ScopeDataGCPolicy : public NonGCPointerPolicy<T> {};
+
+#define DEFINE_SCOPE_DATA_GCPOLICY(Data) \
+ template <> \
+ struct MapTypeToRootKind<Data*> { \
+ static const RootKind kind = RootKind::Traceable; \
+ }; \
+ template <> \
+ struct GCPolicy<Data*> : public ScopeDataGCPolicy<Data*> {}
+
+DEFINE_SCOPE_DATA_GCPOLICY(js::LexicalScope::RuntimeData);
+DEFINE_SCOPE_DATA_GCPOLICY(js::ClassBodyScope::RuntimeData);
+DEFINE_SCOPE_DATA_GCPOLICY(js::FunctionScope::RuntimeData);
+DEFINE_SCOPE_DATA_GCPOLICY(js::VarScope::RuntimeData);
+DEFINE_SCOPE_DATA_GCPOLICY(js::GlobalScope::RuntimeData);
+DEFINE_SCOPE_DATA_GCPOLICY(js::EvalScope::RuntimeData);
+DEFINE_SCOPE_DATA_GCPOLICY(js::ModuleScope::RuntimeData);
+DEFINE_SCOPE_DATA_GCPOLICY(js::WasmFunctionScope::RuntimeData);
+
+#undef DEFINE_SCOPE_DATA_GCPOLICY
+
+namespace ubi {
+
+template <>
+class Concrete<js::Scope> : TracerConcrete<js::Scope> {
+ protected:
+ explicit Concrete(js::Scope* ptr) : TracerConcrete<js::Scope>(ptr) {}
+
+ public:
+ static void construct(void* storage, js::Scope* ptr) {
+ new (storage) Concrete(ptr);
+ }
+
+ CoarseType coarseType() const final { return CoarseType::Script; }
+
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override;
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+} // namespace ubi
+} // namespace JS
+
+#endif // vm_Scope_h
diff --git a/js/src/vm/ScopeKind.h b/js/src/vm/ScopeKind.h
new file mode 100644
index 0000000000..29f7643a28
--- /dev/null
+++ b/js/src/vm/ScopeKind.h
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ScopeKind_h
+#define vm_ScopeKind_h
+
+#include <stdint.h>
+
+namespace js {
+
+enum class ScopeKind : uint8_t {
+ // FunctionScope
+ Function,
+
+ // VarScope
+ FunctionBodyVar,
+
+ // LexicalScope
+ Lexical,
+ SimpleCatch,
+ Catch,
+ NamedLambda,
+ StrictNamedLambda,
+ FunctionLexical,
+ ClassBody,
+
+ // WithScope
+ With,
+
+ // EvalScope
+ Eval,
+ StrictEval,
+
+ // GlobalScope
+ Global,
+ NonSyntactic,
+
+ // ModuleScope
+ Module,
+
+ // WasmInstanceScope
+ WasmInstance,
+
+ // WasmFunctionScope
+ WasmFunction
+};
+
+} // namespace js
+
+#endif // vm_ScopeKind_hs
diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
new file mode 100644
index 0000000000..0dc990dc32
--- /dev/null
+++ b/js/src/vm/SelfHosting.cpp
@@ -0,0 +1,2784 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/SelfHosting.h"
+
+#include "mozilla/BinarySearch.h"
+#include "mozilla/Casting.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/ScopeExit.h" // mozilla::MakeScopeExit
+#include "mozilla/Utf8.h" // mozilla::Utf8Unit
+
+#include <algorithm>
+#include <iterator>
+
+#include "jsdate.h"
+#include "jsfriendapi.h"
+#include "jsmath.h"
+#include "jsnum.h"
+#include "selfhosted.out.h"
+
+#include "builtin/Array.h"
+#include "builtin/BigInt.h"
+#ifdef JS_HAS_INTL_API
+# include "builtin/intl/Collator.h"
+# include "builtin/intl/DateTimeFormat.h"
+# include "builtin/intl/DisplayNames.h"
+# include "builtin/intl/IntlObject.h"
+# include "builtin/intl/ListFormat.h"
+# include "builtin/intl/Locale.h"
+# include "builtin/intl/NumberFormat.h"
+# include "builtin/intl/PluralRules.h"
+# include "builtin/intl/RelativeTimeFormat.h"
+#endif
+#include "builtin/MapObject.h"
+#include "builtin/Object.h"
+#include "builtin/Promise.h"
+#include "builtin/Reflect.h"
+#include "builtin/RegExp.h"
+#include "builtin/SelfHostingDefines.h"
+#include "builtin/String.h"
+#ifdef ENABLE_RECORD_TUPLE
+# include "builtin/TupleObject.h"
+#endif
+#include "frontend/BytecodeCompilation.h" // CompileGlobalScriptToStencil
+#include "frontend/CompilationStencil.h" // js::frontend::CompilationStencil
+#include "frontend/FrontendContext.h" // AutoReportFrontendContext
+#include "jit/AtomicOperations.h"
+#include "jit/InlinableNatives.h"
+#include "js/CompilationAndEvaluation.h"
+#include "js/Conversions.h"
+#include "js/ErrorReport.h" // JS::PrintError
+#include "js/experimental/JSStencil.h"
+#include "js/experimental/TypedData.h" // JS_GetArrayBufferViewType
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/HashTable.h"
+#include "js/Printer.h"
+#include "js/PropertySpec.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "js/SourceText.h" // JS::SourceText
+#include "js/TracingAPI.h"
+#include "js/Transcoding.h"
+#include "js/Warnings.h" // JS::{,Set}WarningReporter
+#include "js/Wrapper.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/AsyncFunction.h"
+#include "vm/AsyncIteration.h"
+#include "vm/BigIntType.h"
+#include "vm/Compression.h"
+#include "vm/DateObject.h"
+#include "vm/ErrorReporting.h" // js::MaybePrintAndClearPendingException
+#include "vm/FrameIter.h" // js::ScriptFrameIter
+#include "vm/GeneratorObject.h"
+#include "vm/Interpreter.h"
+#include "vm/Iteration.h"
+#include "vm/JSContext.h"
+#include "vm/JSFunction.h"
+#include "vm/JSObject.h"
+#include "vm/PIC.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/Realm.h"
+#include "vm/RegExpObject.h"
+#include "vm/StringType.h"
+#include "vm/ToSource.h" // js::ValueToSource
+#include "vm/TypedArrayObject.h"
+#include "vm/Uint8Clamped.h"
+#include "vm/WrapperObject.h"
+
+#include "vm/Compartment-inl.h"
+#include "vm/JSAtom-inl.h"
+#include "vm/JSFunction-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/TypedArrayObject-inl.h"
+
+using namespace js;
+using namespace js::selfhosted;
+
+using JS::CompileOptions;
+using mozilla::Maybe;
+
+static bool intrinsic_ToObject(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ JSObject* obj = ToObject(cx, args[0]);
+ if (!obj) {
+ return false;
+ }
+ args.rval().setObject(*obj);
+ return true;
+}
+
+#ifdef ENABLE_RECORD_TUPLE
+
+bool intrinsic_ThisTupleValue(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ mozilla::Maybe<TupleType&> result = js::ThisTupleValue(cx, args[0]);
+ if (!result) {
+ return false;
+ }
+ args.rval().setExtendedPrimitive(*result);
+ return true;
+}
+
+bool intrinsic_TupleLength(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ mozilla::Maybe<TupleType&> result = js::ThisTupleValue(cx, args[0]);
+ if (!result) {
+ return false;
+ }
+ args.rval().setInt32((*result).getDenseInitializedLength());
+ return true;
+}
+#endif
+
+static bool intrinsic_IsObject(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ Value val = args[0];
+ bool isObject = val.isObject();
+ args.rval().setBoolean(isObject);
+ return true;
+}
+
+static bool intrinsic_IsArray(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ RootedValue val(cx, args[0]);
+ if (val.isObject()) {
+ RootedObject obj(cx, &val.toObject());
+ bool isArray = false;
+ if (!IsArray(cx, obj, &isArray)) {
+ return false;
+ }
+ args.rval().setBoolean(isArray);
+ } else {
+ args.rval().setBoolean(false);
+ }
+ return true;
+}
+
+#ifdef ENABLE_RECORD_TUPLE
+// returns true for TupleTypes and TupleObjects
+bool js::IsTupleUnchecked(JSContext* cx, const CallArgs& args) {
+ args.rval().setBoolean(IsTuple(args.get(0)));
+ return true;
+}
+
+/* Identical to Tuple.prototype.isTuple, but with an
+ * added check that args.length() is 1
+ */
+bool js::intrinsic_IsTuple(JSContext* cx, unsigned argc, JS::Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ return js::IsTupleUnchecked(cx, args);
+}
+#endif
+
+static bool intrinsic_IsCrossRealmArrayConstructor(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ bool result = false;
+ if (!IsCrossRealmArrayConstructor(cx, &args[0].toObject(), &result)) {
+ return false;
+ }
+ args.rval().setBoolean(result);
+ return true;
+}
+
+static bool intrinsic_ToLength(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+
+ // Inline fast path for the common case.
+ if (args[0].isInt32()) {
+ int32_t i = args[0].toInt32();
+ args.rval().setInt32(i < 0 ? 0 : i);
+ return true;
+ }
+
+ uint64_t length = 0;
+ if (!ToLength(cx, args[0], &length)) {
+ return false;
+ }
+
+ args.rval().setNumber(double(length));
+ return true;
+}
+
+static bool intrinsic_ToInteger(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ double result;
+ if (!ToInteger(cx, args[0], &result)) {
+ return false;
+ }
+ args.rval().setNumber(result);
+ return true;
+}
+
+static bool intrinsic_ToSource(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ JSString* str = ValueToSource(cx, args[0]);
+ if (!str) {
+ return false;
+ }
+ args.rval().setString(str);
+ return true;
+}
+
+static bool intrinsic_ToPropertyKey(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ RootedId id(cx);
+ if (!ToPropertyKey(cx, args[0], &id)) {
+ return false;
+ }
+
+ args.rval().set(IdToValue(id));
+ return true;
+}
+
+static bool intrinsic_IsCallable(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setBoolean(IsCallable(args[0]));
+ return true;
+}
+
+static bool intrinsic_IsConstructor(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ args.rval().setBoolean(IsConstructor(args[0]));
+ return true;
+}
+
+template <typename T>
+static bool intrinsic_IsInstanceOfBuiltin(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ args.rval().setBoolean(args[0].toObject().is<T>());
+ return true;
+}
+
+template <typename T>
+static bool intrinsic_GuardToBuiltin(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ if (args[0].toObject().is<T>()) {
+ args.rval().setObject(args[0].toObject());
+ return true;
+ }
+ args.rval().setNull();
+ return true;
+}
+
+template <typename T>
+static bool intrinsic_IsWrappedInstanceOfBuiltin(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ JSObject* obj = &args[0].toObject();
+ if (!obj->is<WrapperObject>()) {
+ args.rval().setBoolean(false);
+ return true;
+ }
+
+ JSObject* unwrapped = CheckedUnwrapDynamic(obj, cx);
+ if (!unwrapped) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+
+ args.rval().setBoolean(unwrapped->is<T>());
+ return true;
+}
+
+template <typename T>
+static bool intrinsic_IsPossiblyWrappedInstanceOfBuiltin(JSContext* cx,
+ unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ JSObject* obj = CheckedUnwrapDynamic(&args[0].toObject(), cx);
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+
+ args.rval().setBoolean(obj->is<T>());
+ return true;
+}
+
+static bool intrinsic_SubstringKernel(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args[0].isString());
+ MOZ_RELEASE_ASSERT(args[1].isInt32());
+ MOZ_RELEASE_ASSERT(args[2].isInt32());
+
+ RootedString str(cx, args[0].toString());
+ int32_t begin = args[1].toInt32();
+ int32_t length = args[2].toInt32();
+
+ JSString* substr = SubstringKernel(cx, str, begin, length);
+ if (!substr) {
+ return false;
+ }
+
+ args.rval().setString(substr);
+ return true;
+}
+
+static void ThrowErrorWithType(JSContext* cx, JSExnType type,
+ const CallArgs& args) {
+ MOZ_RELEASE_ASSERT(args[0].isInt32());
+ uint32_t errorNumber = args[0].toInt32();
+
+#ifdef DEBUG
+ const JSErrorFormatString* efs = GetErrorMessage(nullptr, errorNumber);
+ MOZ_ASSERT(efs->argCount == args.length() - 1);
+ MOZ_ASSERT(efs->exnType == type,
+ "error-throwing intrinsic and error number are inconsistent");
+#endif
+
+ UniqueChars errorArgs[3];
+ for (unsigned i = 1; i < 4 && i < args.length(); i++) {
+ HandleValue val = args[i];
+ if (val.isInt32() || val.isString()) {
+ JSString* str = ToString<CanGC>(cx, val);
+ if (!str) {
+ return;
+ }
+ errorArgs[i - 1] = QuoteString(cx, str);
+ } else {
+ errorArgs[i - 1] =
+ DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, val, nullptr);
+ }
+ if (!errorArgs[i - 1]) {
+ return;
+ }
+ }
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber,
+ errorArgs[0].get(), errorArgs[1].get(),
+ errorArgs[2].get());
+}
+
+static bool intrinsic_ThrowRangeError(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() >= 1);
+
+ ThrowErrorWithType(cx, JSEXN_RANGEERR, args);
+ return false;
+}
+
+static bool intrinsic_ThrowTypeError(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() >= 1);
+
+ ThrowErrorWithType(cx, JSEXN_TYPEERR, args);
+ return false;
+}
+
+static bool intrinsic_ThrowAggregateError(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() >= 1);
+
+ ThrowErrorWithType(cx, JSEXN_AGGREGATEERR, args);
+ return false;
+}
+
+static bool intrinsic_ThrowInternalError(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() >= 1);
+
+ ThrowErrorWithType(cx, JSEXN_INTERNALERR, args);
+ return false;
+}
+
+/**
+ * Handles an assertion failure in self-hosted code just like an assertion
+ * failure in C++ code. Information about the failure can be provided in
+ * args[0].
+ */
+static bool intrinsic_AssertionFailed(JSContext* cx, unsigned argc, Value* vp) {
+#ifdef DEBUG
+ CallArgs args = CallArgsFromVp(argc, vp);
+ if (args.length() > 0) {
+ // try to dump the informative string
+ JSString* str = ToString<CanGC>(cx, args[0]);
+ if (str) {
+ js::Fprinter out(stderr);
+ out.put("Self-hosted JavaScript assertion info: ");
+ str->dumpCharsNoNewline(out);
+ out.putChar('\n');
+ }
+ }
+#endif
+ MOZ_ASSERT(false);
+ return false;
+}
+
+/**
+ * Dumps a message to stderr, after stringifying it. Doesn't append a newline.
+ */
+static bool intrinsic_DumpMessage(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+#ifdef DEBUG
+ if (args.length() > 0) {
+ // try to dump the informative string
+ js::Fprinter out(stderr);
+ JSString* str = ToString<CanGC>(cx, args[0]);
+ if (str) {
+ str->dumpCharsNoNewline(out);
+ out.putChar('\n');
+ } else {
+ cx->recoverFromOutOfMemory();
+ }
+ }
+#endif
+ args.rval().setUndefined();
+ return true;
+}
+
+/*
+ * Used to decompile values in the nearest non-builtin stack frame, falling
+ * back to decompiling in the current frame. Helpful for printing higher-order
+ * function arguments.
+ *
+ * The user must supply the argument number of the value in question; it
+ * _cannot_ be automatically determined.
+ */
+static bool intrinsic_DecompileArg(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 2);
+ MOZ_RELEASE_ASSERT(args[0].isInt32());
+
+ HandleValue value = args[1];
+ JSString* str = DecompileArgument(cx, args[0].toInt32(), value);
+ if (!str) {
+ return false;
+ }
+ args.rval().setString(str);
+ return true;
+}
+
+static bool intrinsic_DefineDataProperty(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // When DefineDataProperty is called with 3 arguments, it's compiled to
+ // JSOp::InitElem in the bytecode emitter so we shouldn't get here.
+ MOZ_ASSERT(args.length() == 4);
+ MOZ_ASSERT(args[0].isObject());
+ MOZ_RELEASE_ASSERT(args[3].isInt32());
+
+ RootedObject obj(cx, &args[0].toObject());
+ RootedId id(cx);
+ if (!ToPropertyKey(cx, args[1], &id)) {
+ return false;
+ }
+ RootedValue value(cx, args[2]);
+
+ JS::PropertyAttributes attrs;
+ unsigned attributes = args[3].toInt32();
+
+ MOZ_ASSERT(bool(attributes & ATTR_ENUMERABLE) !=
+ bool(attributes & ATTR_NONENUMERABLE),
+ "DefineDataProperty must receive either ATTR_ENUMERABLE xor "
+ "ATTR_NONENUMERABLE");
+ if (attributes & ATTR_ENUMERABLE) {
+ attrs += JS::PropertyAttribute::Enumerable;
+ }
+
+ MOZ_ASSERT(bool(attributes & ATTR_CONFIGURABLE) !=
+ bool(attributes & ATTR_NONCONFIGURABLE),
+ "DefineDataProperty must receive either ATTR_CONFIGURABLE xor "
+ "ATTR_NONCONFIGURABLE");
+ if (attributes & ATTR_CONFIGURABLE) {
+ attrs += JS::PropertyAttribute::Configurable;
+ }
+
+ MOZ_ASSERT(
+ bool(attributes & ATTR_WRITABLE) != bool(attributes & ATTR_NONWRITABLE),
+ "DefineDataProperty must receive either ATTR_WRITABLE xor "
+ "ATTR_NONWRITABLE");
+ if (attributes & ATTR_WRITABLE) {
+ attrs += JS::PropertyAttribute::Writable;
+ }
+
+ Rooted<PropertyDescriptor> desc(cx, PropertyDescriptor::Data(value, attrs));
+ if (!DefineProperty(cx, obj, id, desc)) {
+ return false;
+ }
+
+ args.rval().setUndefined();
+ return true;
+}
+
+static bool intrinsic_DefineProperty(JSContext* cx, unsigned argc, Value* vp) {
+ // _DefineProperty(object, propertyKey, attributes,
+ // valueOrGetter, setter, strict)
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 6);
+ MOZ_ASSERT(args[0].isObject());
+ MOZ_ASSERT(args[1].isString() || args[1].isNumber() || args[1].isSymbol());
+ MOZ_RELEASE_ASSERT(args[2].isInt32());
+ MOZ_ASSERT(args[5].isBoolean());
+
+ RootedObject obj(cx, &args[0].toObject());
+ RootedId id(cx);
+ if (!PrimitiveValueToId<CanGC>(cx, args[1], &id)) {
+ return false;
+ }
+
+ Rooted<PropertyDescriptor> desc(cx, PropertyDescriptor::Empty());
+
+ unsigned attributes = args[2].toInt32();
+ if (attributes & (ATTR_ENUMERABLE | ATTR_NONENUMERABLE)) {
+ desc.setEnumerable(attributes & ATTR_ENUMERABLE);
+ }
+
+ if (attributes & (ATTR_CONFIGURABLE | ATTR_NONCONFIGURABLE)) {
+ desc.setConfigurable(attributes & ATTR_CONFIGURABLE);
+ }
+
+ if (attributes & (ATTR_WRITABLE | ATTR_NONWRITABLE)) {
+ desc.setWritable(attributes & ATTR_WRITABLE);
+ }
+
+ // When args[4] is |null|, the data descriptor has a value component.
+ if ((attributes & DATA_DESCRIPTOR_KIND) && args[4].isNull()) {
+ desc.setValue(args[3]);
+ }
+
+ if (attributes & ACCESSOR_DESCRIPTOR_KIND) {
+ Value getter = args[3];
+ if (getter.isObject()) {
+ desc.setGetter(&getter.toObject());
+ } else if (getter.isUndefined()) {
+ desc.setGetter(nullptr);
+ } else {
+ MOZ_ASSERT(getter.isNull());
+ }
+
+ Value setter = args[4];
+ if (setter.isObject()) {
+ desc.setSetter(&setter.toObject());
+ } else if (setter.isUndefined()) {
+ desc.setSetter(nullptr);
+ } else {
+ MOZ_ASSERT(setter.isNull());
+ }
+ }
+
+ desc.assertValid();
+
+ ObjectOpResult result;
+ if (!DefineProperty(cx, obj, id, desc, result)) {
+ return false;
+ }
+
+ bool strict = args[5].toBoolean();
+ if (strict && !result.ok()) {
+ // We need to tell our caller Object.defineProperty,
+ // that this operation failed, without actually throwing
+ // for web-compatibility reasons.
+ if (result.failureCode() == JSMSG_CANT_DEFINE_WINDOW_NC) {
+ args.rval().setBoolean(false);
+ return true;
+ }
+
+ return result.reportError(cx, obj, id);
+ }
+
+ args.rval().setBoolean(result.ok());
+ return true;
+}
+
+static bool intrinsic_ObjectHasPrototype(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 2);
+
+ // Self-hosted code calls this intrinsic with builtin prototypes. These are
+ // always native objects.
+ auto* obj = &args[0].toObject().as<NativeObject>();
+ auto* proto = &args[1].toObject().as<NativeObject>();
+
+ JSObject* actualProto = obj->staticPrototype();
+ args.rval().setBoolean(actualProto == proto);
+ return true;
+}
+
+static bool intrinsic_UnsafeSetReservedSlot(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 3);
+ MOZ_ASSERT(args[0].isObject());
+ MOZ_RELEASE_ASSERT(args[1].isInt32());
+ MOZ_ASSERT(args[1].toInt32() >= 0);
+
+ uint32_t slot = uint32_t(args[1].toInt32());
+ args[0].toObject().as<NativeObject>().setReservedSlot(slot, args[2]);
+ args.rval().setUndefined();
+ return true;
+}
+
+static bool intrinsic_UnsafeGetReservedSlot(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 2);
+ MOZ_ASSERT(args[0].isObject());
+ MOZ_RELEASE_ASSERT(args[1].isInt32());
+ MOZ_ASSERT(args[1].toInt32() >= 0);
+
+ uint32_t slot = uint32_t(args[1].toInt32());
+ args.rval().set(args[0].toObject().as<NativeObject>().getReservedSlot(slot));
+ return true;
+}
+
+static bool intrinsic_UnsafeGetObjectFromReservedSlot(JSContext* cx,
+ unsigned argc,
+ Value* vp) {
+ if (!intrinsic_UnsafeGetReservedSlot(cx, argc, vp)) {
+ return false;
+ }
+ MOZ_ASSERT(vp->isObject());
+ return true;
+}
+
+static bool intrinsic_UnsafeGetInt32FromReservedSlot(JSContext* cx,
+ unsigned argc, Value* vp) {
+ if (!intrinsic_UnsafeGetReservedSlot(cx, argc, vp)) {
+ return false;
+ }
+ MOZ_ASSERT(vp->isInt32());
+ return true;
+}
+
+static bool intrinsic_UnsafeGetStringFromReservedSlot(JSContext* cx,
+ unsigned argc,
+ Value* vp) {
+ if (!intrinsic_UnsafeGetReservedSlot(cx, argc, vp)) {
+ return false;
+ }
+ MOZ_ASSERT(vp->isString());
+ return true;
+}
+
+static bool intrinsic_ThisTimeValue(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isInt32());
+
+ const char* name = nullptr;
+
+ int32_t method = args[0].toInt32();
+ if (method == DATE_METHOD_LOCALE_TIME_STRING) {
+ name = "toLocaleTimeString";
+ } else if (method == DATE_METHOD_LOCALE_DATE_STRING) {
+ name = "toLocaleDateString";
+ } else {
+ MOZ_ASSERT(method == DATE_METHOD_LOCALE_STRING);
+ name = "toLocaleString";
+ }
+
+ auto* unwrapped = UnwrapAndTypeCheckThis<DateObject>(cx, args, name);
+ if (!unwrapped) {
+ return false;
+ }
+
+ args.rval().set(unwrapped->UTCTime());
+ return true;
+}
+
+static bool intrinsic_IsPackedArray(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+ args.rval().setBoolean(IsPackedArray(&args[0].toObject()));
+ return true;
+}
+
+bool js::intrinsic_NewArrayIterator(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ JSObject* obj = NewArrayIterator(cx);
+ if (!obj) {
+ return false;
+ }
+
+ args.rval().setObject(*obj);
+ return true;
+}
+
+static bool intrinsic_ArrayIteratorPrototypeOptimizable(JSContext* cx,
+ unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ ForOfPIC::Chain* stubChain = ForOfPIC::getOrCreate(cx);
+ if (!stubChain) {
+ return false;
+ }
+
+ bool optimized;
+ if (!stubChain->tryOptimizeArrayIteratorNext(cx, &optimized)) {
+ return false;
+ }
+ args.rval().setBoolean(optimized);
+ return true;
+}
+
+static bool intrinsic_GetNextMapEntryForIterator(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 2);
+ MOZ_ASSERT(args[0].toObject().is<MapIteratorObject>());
+ MOZ_ASSERT(args[1].isObject());
+
+ MapIteratorObject* mapIterator = &args[0].toObject().as<MapIteratorObject>();
+ ArrayObject* result = &args[1].toObject().as<ArrayObject>();
+
+ args.rval().setBoolean(MapIteratorObject::next(mapIterator, result));
+ return true;
+}
+
+static bool intrinsic_CreateMapIterationResultPair(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ JSObject* result = MapIteratorObject::createResultPair(cx);
+ if (!result) {
+ return false;
+ }
+
+ args.rval().setObject(*result);
+ return true;
+}
+
+static bool intrinsic_GetNextSetEntryForIterator(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 2);
+ MOZ_ASSERT(args[0].toObject().is<SetIteratorObject>());
+ MOZ_ASSERT(args[1].isObject());
+
+ SetIteratorObject* setIterator = &args[0].toObject().as<SetIteratorObject>();
+ ArrayObject* result = &args[1].toObject().as<ArrayObject>();
+
+ args.rval().setBoolean(SetIteratorObject::next(setIterator, result));
+ return true;
+}
+
+static bool intrinsic_CreateSetIterationResult(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ JSObject* result = SetIteratorObject::createResult(cx);
+ if (!result) {
+ return false;
+ }
+
+ args.rval().setObject(*result);
+ return true;
+}
+
+bool js::intrinsic_NewStringIterator(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ JSObject* obj = NewStringIterator(cx);
+ if (!obj) {
+ return false;
+ }
+
+ args.rval().setObject(*obj);
+ return true;
+}
+
+bool js::intrinsic_NewRegExpStringIterator(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ JSObject* obj = NewRegExpStringIterator(cx);
+ if (!obj) {
+ return false;
+ }
+
+ args.rval().setObject(*obj);
+ return true;
+}
+
+js::PropertyName* js::GetClonedSelfHostedFunctionName(const JSFunction* fun) {
+ if (!fun->isExtended()) {
+ return nullptr;
+ }
+ Value name = fun->getExtendedSlot(LAZY_FUNCTION_NAME_SLOT);
+ if (!name.isString()) {
+ return nullptr;
+ }
+ return name.toString()->asAtom().asPropertyName();
+}
+
+bool js::IsExtendedUnclonedSelfHostedFunctionName(JSAtom* name) {
+ if (name->length() < 2) {
+ return false;
+ }
+ return name->latin1OrTwoByteChar(0) ==
+ ExtendedUnclonedSelfHostedFunctionNamePrefix;
+}
+
+void js::SetClonedSelfHostedFunctionName(JSFunction* fun,
+ js::PropertyName* name) {
+ fun->setExtendedSlot(LAZY_FUNCTION_NAME_SLOT, StringValue(name));
+}
+
+static bool intrinsic_GeneratorObjectIsClosed(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ GeneratorObject* genObj = &args[0].toObject().as<GeneratorObject>();
+ args.rval().setBoolean(genObj->isClosed());
+ return true;
+}
+
+static bool intrinsic_IsSuspendedGenerator(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+
+ if (!args[0].isObject() || !args[0].toObject().is<GeneratorObject>()) {
+ args.rval().setBoolean(false);
+ return true;
+ }
+
+ GeneratorObject& genObj = args[0].toObject().as<GeneratorObject>();
+ args.rval().setBoolean(!genObj.isClosed() && genObj.isSuspended());
+ return true;
+}
+
+static bool intrinsic_GeneratorIsRunning(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ GeneratorObject* genObj = &args[0].toObject().as<GeneratorObject>();
+ args.rval().setBoolean(genObj->isRunning());
+ return true;
+}
+
+static bool intrinsic_GeneratorSetClosed(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ GeneratorObject* genObj = &args[0].toObject().as<GeneratorObject>();
+ genObj->setClosed();
+ return true;
+}
+
+template <typename T>
+static bool intrinsic_ArrayBufferByteLength(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+ MOZ_ASSERT(args[0].toObject().is<T>());
+
+ size_t byteLength = args[0].toObject().as<T>().byteLength();
+ args.rval().setNumber(byteLength);
+ return true;
+}
+
+template <typename T>
+static bool intrinsic_PossiblyWrappedArrayBufferByteLength(JSContext* cx,
+ unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+
+ T* obj = args[0].toObject().maybeUnwrapAs<T>();
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+
+ size_t byteLength = obj->byteLength();
+ args.rval().setNumber(byteLength);
+ return true;
+}
+
+static void AssertNonNegativeInteger(const Value& v) {
+ MOZ_ASSERT(v.isNumber());
+ MOZ_ASSERT(v.toNumber() >= 0);
+ MOZ_ASSERT(v.toNumber() < DOUBLE_INTEGRAL_PRECISION_LIMIT);
+ MOZ_ASSERT(JS::ToInteger(v.toNumber()) == v.toNumber());
+}
+
+template <typename T>
+static bool intrinsic_ArrayBufferCopyData(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 6);
+ AssertNonNegativeInteger(args[1]);
+ AssertNonNegativeInteger(args[3]);
+ AssertNonNegativeInteger(args[4]);
+
+ bool isWrapped = args[5].toBoolean();
+ Rooted<T*> toBuffer(cx);
+ if (!isWrapped) {
+ toBuffer = &args[0].toObject().as<T>();
+ } else {
+ JSObject* wrapped = &args[0].toObject();
+ MOZ_ASSERT(wrapped->is<WrapperObject>());
+ toBuffer = wrapped->maybeUnwrapAs<T>();
+ if (!toBuffer) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+ }
+ size_t toIndex = size_t(args[1].toNumber());
+ Rooted<T*> fromBuffer(cx, &args[2].toObject().as<T>());
+ size_t fromIndex = size_t(args[3].toNumber());
+ size_t count = size_t(args[4].toNumber());
+
+ T::copyData(toBuffer, toIndex, fromBuffer, fromIndex, count);
+
+ args.rval().setUndefined();
+ return true;
+}
+
+// Arguments must both be SharedArrayBuffer or wrapped SharedArrayBuffer.
+static bool intrinsic_SharedArrayBuffersMemorySame(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 2);
+
+ auto* lhs = args[0].toObject().maybeUnwrapAs<SharedArrayBufferObject>();
+ if (!lhs) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+ auto* rhs = args[1].toObject().maybeUnwrapAs<SharedArrayBufferObject>();
+ if (!rhs) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+
+ args.rval().setBoolean(lhs->rawBufferObject() == rhs->rawBufferObject());
+ return true;
+}
+
+static bool intrinsic_GetTypedArrayKind(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ static_assert(TYPEDARRAY_KIND_INT8 == Scalar::Type::Int8,
+ "TYPEDARRAY_KIND_INT8 doesn't match the scalar type");
+ static_assert(TYPEDARRAY_KIND_UINT8 == Scalar::Type::Uint8,
+ "TYPEDARRAY_KIND_UINT8 doesn't match the scalar type");
+ static_assert(TYPEDARRAY_KIND_INT16 == Scalar::Type::Int16,
+ "TYPEDARRAY_KIND_INT16 doesn't match the scalar type");
+ static_assert(TYPEDARRAY_KIND_UINT16 == Scalar::Type::Uint16,
+ "TYPEDARRAY_KIND_UINT16 doesn't match the scalar type");
+ static_assert(TYPEDARRAY_KIND_INT32 == Scalar::Type::Int32,
+ "TYPEDARRAY_KIND_INT32 doesn't match the scalar type");
+ static_assert(TYPEDARRAY_KIND_UINT32 == Scalar::Type::Uint32,
+ "TYPEDARRAY_KIND_UINT32 doesn't match the scalar type");
+ static_assert(TYPEDARRAY_KIND_FLOAT32 == Scalar::Type::Float32,
+ "TYPEDARRAY_KIND_FLOAT32 doesn't match the scalar type");
+ static_assert(TYPEDARRAY_KIND_FLOAT64 == Scalar::Type::Float64,
+ "TYPEDARRAY_KIND_FLOAT64 doesn't match the scalar type");
+ static_assert(TYPEDARRAY_KIND_UINT8CLAMPED == Scalar::Type::Uint8Clamped,
+ "TYPEDARRAY_KIND_UINT8CLAMPED doesn't match the scalar type");
+ static_assert(TYPEDARRAY_KIND_BIGINT64 == Scalar::Type::BigInt64,
+ "TYPEDARRAY_KIND_BIGINT64 doesn't match the scalar type");
+ static_assert(TYPEDARRAY_KIND_BIGUINT64 == Scalar::Type::BigUint64,
+ "TYPEDARRAY_KIND_BIGUINT64 doesn't match the scalar type");
+
+ JSObject* obj = &args[0].toObject();
+ Scalar::Type type = JS_GetArrayBufferViewType(obj);
+
+ args.rval().setInt32(static_cast<int32_t>(type));
+ return true;
+}
+
+static bool intrinsic_IsTypedArrayConstructor(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ args.rval().setBoolean(js::IsTypedArrayConstructor(&args[0].toObject()));
+ return true;
+}
+
+static bool intrinsic_TypedArrayBuffer(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(TypedArrayObject::is(args[0]));
+
+ Rooted<TypedArrayObject*> tarray(cx,
+ &args[0].toObject().as<TypedArrayObject>());
+ if (!TypedArrayObject::ensureHasBuffer(cx, tarray)) {
+ return false;
+ }
+
+ args.rval().set(tarray->bufferValue());
+ return true;
+}
+
+static bool intrinsic_TypedArrayByteOffset(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(TypedArrayObject::is(args[0]));
+
+ auto* tarr = &args[0].toObject().as<TypedArrayObject>();
+ args.rval().set(tarr->byteOffsetValue());
+ return true;
+}
+
+static bool intrinsic_TypedArrayElementSize(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(TypedArrayObject::is(args[0]));
+
+ unsigned size =
+ TypedArrayElemSize(args[0].toObject().as<TypedArrayObject>().type());
+ MOZ_ASSERT(size == 1 || size == 2 || size == 4 || size == 8);
+
+ args.rval().setInt32(mozilla::AssertedCast<int32_t>(size));
+ return true;
+}
+
+// Return the value of [[ArrayLength]] internal slot of the TypedArray
+static bool intrinsic_TypedArrayLength(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(TypedArrayObject::is(args[0]));
+
+ auto* tarr = &args[0].toObject().as<TypedArrayObject>();
+ args.rval().set(tarr->lengthValue());
+ return true;
+}
+
+static bool intrinsic_PossiblyWrappedTypedArrayLength(JSContext* cx,
+ unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ TypedArrayObject* obj = args[0].toObject().maybeUnwrapAs<TypedArrayObject>();
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+
+ args.rval().set(obj->lengthValue());
+ return true;
+}
+
+static bool intrinsic_PossiblyWrappedTypedArrayHasDetachedBuffer(JSContext* cx,
+ unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ TypedArrayObject* obj = args[0].toObject().maybeUnwrapAs<TypedArrayObject>();
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+
+ bool detached = obj->hasDetachedBuffer();
+ args.rval().setBoolean(detached);
+ return true;
+}
+
+// Extract the TypedArrayObject* underlying |obj| and return it. This method,
+// in a TOTALLY UNSAFE manner, completely violates the normal compartment
+// boundaries, returning an object not necessarily in the current compartment
+// or in |obj|'s compartment.
+//
+// All callers of this method are expected to sigil this TypedArrayObject*, and
+// all values and information derived from it, with an "unsafe" prefix, to
+// indicate the extreme caution required when dealing with such values.
+//
+// If calling code discipline ever fails to be maintained, it's gonna have a
+// bad time.
+static TypedArrayObject* DangerouslyUnwrapTypedArray(JSContext* cx,
+ JSObject* obj) {
+ // An unwrapped pointer to an object potentially on the other side of a
+ // compartment boundary! Isn't this such fun?
+ TypedArrayObject* unwrapped = obj->maybeUnwrapAs<TypedArrayObject>();
+ if (!unwrapped) {
+ ReportAccessDenied(cx);
+ return nullptr;
+ }
+
+ // Be super-duper careful using this, as we've just punched through
+ // the compartment boundary, and things like buffer() on this aren't
+ // same-compartment with anything else in the calling method.
+ return unwrapped;
+}
+
+// The specification requires us to perform bitwise copying when |sourceType|
+// and |targetType| are the same (ES2017, §22.2.3.24, step 15). Additionally,
+// as an optimization, we can also perform bitwise copying when |sourceType|
+// and |targetType| have compatible bit-level representations.
+static bool IsTypedArrayBitwiseSlice(Scalar::Type sourceType,
+ Scalar::Type targetType) {
+ switch (sourceType) {
+ case Scalar::Int8:
+ return targetType == Scalar::Int8 || targetType == Scalar::Uint8;
+
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ return targetType == Scalar::Int8 || targetType == Scalar::Uint8 ||
+ targetType == Scalar::Uint8Clamped;
+
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ return targetType == Scalar::Int16 || targetType == Scalar::Uint16;
+
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ return targetType == Scalar::Int32 || targetType == Scalar::Uint32;
+
+ case Scalar::Float32:
+ return targetType == Scalar::Float32;
+
+ case Scalar::Float64:
+ return targetType == Scalar::Float64;
+
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ return targetType == Scalar::BigInt64 || targetType == Scalar::BigUint64;
+
+ default:
+ MOZ_CRASH("IsTypedArrayBitwiseSlice with a bogus typed array type");
+ }
+}
+
+static bool intrinsic_TypedArrayBitwiseSlice(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 4);
+ MOZ_ASSERT(args[0].isObject());
+ MOZ_ASSERT(args[1].isObject());
+ AssertNonNegativeInteger(args[2]);
+ AssertNonNegativeInteger(args[3]);
+
+ Rooted<TypedArrayObject*> source(cx,
+ &args[0].toObject().as<TypedArrayObject>());
+ MOZ_ASSERT(!source->hasDetachedBuffer());
+
+ // As directed by |DangerouslyUnwrapTypedArray|, sigil this pointer and all
+ // variables derived from it to counsel extreme caution here.
+ Rooted<TypedArrayObject*> unsafeTypedArrayCrossCompartment(cx);
+ unsafeTypedArrayCrossCompartment =
+ DangerouslyUnwrapTypedArray(cx, &args[1].toObject());
+ if (!unsafeTypedArrayCrossCompartment) {
+ return false;
+ }
+ MOZ_ASSERT(!unsafeTypedArrayCrossCompartment->hasDetachedBuffer());
+
+ Scalar::Type sourceType = source->type();
+ if (!IsTypedArrayBitwiseSlice(sourceType,
+ unsafeTypedArrayCrossCompartment->type())) {
+ args.rval().setBoolean(false);
+ return true;
+ }
+
+ size_t sourceOffset = size_t(args[2].toNumber());
+ size_t count = size_t(args[3].toNumber());
+
+ MOZ_ASSERT(count > 0 && count <= source->length());
+ MOZ_ASSERT(sourceOffset <= source->length() - count);
+ MOZ_ASSERT(count <= unsafeTypedArrayCrossCompartment->length());
+
+ size_t elementSize = TypedArrayElemSize(sourceType);
+ MOZ_ASSERT(elementSize ==
+ TypedArrayElemSize(unsafeTypedArrayCrossCompartment->type()));
+
+ SharedMem<uint8_t*> sourceData =
+ source->dataPointerEither().cast<uint8_t*>() + sourceOffset * elementSize;
+
+ SharedMem<uint8_t*> unsafeTargetDataCrossCompartment =
+ unsafeTypedArrayCrossCompartment->dataPointerEither().cast<uint8_t*>();
+
+ size_t byteLength = count * elementSize;
+
+ // The same-type case requires exact copying preserving the bit-level
+ // encoding of the source data, so use memcpy if possible. If source and
+ // target are the same buffer, we can't use memcpy (or memmove), because
+ // the specification requires sequential copying of the values. This case
+ // is only possible if a @@species constructor created a specifically
+ // crafted typed array. It won't happen in normal code and hence doesn't
+ // need to be optimized.
+ if (!TypedArrayObject::sameBuffer(source, unsafeTypedArrayCrossCompartment)) {
+ if (source->isSharedMemory() ||
+ unsafeTypedArrayCrossCompartment->isSharedMemory()) {
+ jit::AtomicOperations::memcpySafeWhenRacy(
+ unsafeTargetDataCrossCompartment, sourceData, byteLength);
+ } else {
+ memcpy(unsafeTargetDataCrossCompartment.unwrapUnshared(),
+ sourceData.unwrapUnshared(), byteLength);
+ }
+ } else {
+ using namespace jit;
+
+ for (; byteLength > 0; byteLength--) {
+ AtomicOperations::storeSafeWhenRacy(
+ unsafeTargetDataCrossCompartment++,
+ AtomicOperations::loadSafeWhenRacy(sourceData++));
+ }
+ }
+
+ args.rval().setBoolean(true);
+ return true;
+}
+
+static bool intrinsic_TypedArrayInitFromPackedArray(JSContext* cx,
+ unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 2);
+ MOZ_ASSERT(args[0].isObject());
+ MOZ_ASSERT(args[1].isObject());
+
+ Rooted<TypedArrayObject*> target(cx,
+ &args[0].toObject().as<TypedArrayObject>());
+ MOZ_ASSERT(!target->hasDetachedBuffer());
+ MOZ_ASSERT(!target->isSharedMemory());
+
+ Rooted<ArrayObject*> source(cx, &args[1].toObject().as<ArrayObject>());
+ MOZ_ASSERT(IsPackedArray(source));
+ MOZ_ASSERT(source->length() == target->length());
+
+ switch (target->type()) {
+#define INIT_TYPED_ARRAY(_, T, N) \
+ case Scalar::N: { \
+ if (!ElementSpecific<T, UnsharedOps>::initFromIterablePackedArray( \
+ cx, target, source)) { \
+ return false; \
+ } \
+ break; \
+ }
+ JS_FOR_EACH_TYPED_ARRAY(INIT_TYPED_ARRAY)
+#undef INIT_TYPED_ARRAY
+
+ default:
+ MOZ_CRASH(
+ "TypedArrayInitFromPackedArray with a typed array with bogus type");
+ }
+
+ args.rval().setUndefined();
+ return true;
+}
+
+template <bool ForTest>
+static bool intrinsic_RegExpBuiltinExec(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 2);
+ MOZ_ASSERT(args[0].isObject());
+ MOZ_ASSERT(args[0].toObject().is<RegExpObject>());
+ MOZ_ASSERT(args[1].isString());
+
+ Rooted<RegExpObject*> obj(cx, &args[0].toObject().as<RegExpObject>());
+ Rooted<JSString*> string(cx, args[1].toString());
+ return RegExpBuiltinExec(cx, obj, string, ForTest, args.rval());
+}
+
+template <bool ForTest>
+static bool intrinsic_RegExpExec(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 2);
+ MOZ_ASSERT(args[0].isObject());
+ MOZ_ASSERT(args[1].isString());
+
+ Rooted<JSObject*> obj(cx, &args[0].toObject());
+ Rooted<JSString*> string(cx, args[1].toString());
+ return RegExpExec(cx, obj, string, ForTest, args.rval());
+}
+
+static bool intrinsic_RegExpCreate(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ MOZ_ASSERT(args.length() == 1 || args.length() == 2);
+ MOZ_ASSERT_IF(args.length() == 2,
+ args[1].isString() || args[1].isUndefined());
+ MOZ_ASSERT(!args.isConstructing());
+
+ return RegExpCreate(cx, args[0], args.get(1), args.rval());
+}
+
+static bool intrinsic_RegExpGetSubstitution(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 6);
+
+ Rooted<ArrayObject*> matchResult(cx, &args[0].toObject().as<ArrayObject>());
+
+ Rooted<JSLinearString*> string(cx, args[1].toString()->ensureLinear(cx));
+ if (!string) {
+ return false;
+ }
+
+ int32_t position = int32_t(args[2].toNumber());
+ MOZ_ASSERT(position >= 0);
+
+ Rooted<JSLinearString*> replacement(cx, args[3].toString()->ensureLinear(cx));
+ if (!replacement) {
+ return false;
+ }
+
+ int32_t firstDollarIndex = int32_t(args[4].toNumber());
+ MOZ_ASSERT(firstDollarIndex >= 0);
+
+ RootedValue namedCaptures(cx, args[5]);
+ MOZ_ASSERT(namedCaptures.isUndefined() || namedCaptures.isObject());
+
+ return RegExpGetSubstitution(cx, matchResult, string, size_t(position),
+ replacement, size_t(firstDollarIndex),
+ namedCaptures, args.rval());
+}
+
+static bool intrinsic_StringReplaceString(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 3);
+
+ RootedString string(cx, args[0].toString());
+ RootedString pattern(cx, args[1].toString());
+ RootedString replacement(cx, args[2].toString());
+ JSString* result = str_replace_string_raw(cx, string, pattern, replacement);
+ if (!result) {
+ return false;
+ }
+
+ args.rval().setString(result);
+ return true;
+}
+
+static bool intrinsic_StringReplaceAllString(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 3);
+
+ RootedString string(cx, args[0].toString());
+ RootedString pattern(cx, args[1].toString());
+ RootedString replacement(cx, args[2].toString());
+ JSString* result =
+ str_replaceAll_string_raw(cx, string, pattern, replacement);
+ if (!result) {
+ return false;
+ }
+
+ args.rval().setString(result);
+ return true;
+}
+
+static bool intrinsic_StringSplitString(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 2);
+
+ RootedString string(cx, args[0].toString());
+ RootedString sep(cx, args[1].toString());
+
+ JSObject* aobj = StringSplitString(cx, string, sep, INT32_MAX);
+ if (!aobj) {
+ return false;
+ }
+
+ args.rval().setObject(*aobj);
+ return true;
+}
+
+static bool intrinsic_StringSplitStringLimit(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 3);
+
+ RootedString string(cx, args[0].toString());
+ RootedString sep(cx, args[1].toString());
+
+ // args[2] should be already in UInt32 range, but it could be double typed,
+ // because of Ion optimization.
+ uint32_t limit = uint32_t(args[2].toNumber());
+ MOZ_ASSERT(limit > 0,
+ "Zero limit case is already handled in self-hosted code.");
+
+ JSObject* aobj = StringSplitString(cx, string, sep, limit);
+ if (!aobj) {
+ return false;
+ }
+
+ args.rval().setObject(*aobj);
+ return true;
+}
+
+bool CallSelfHostedNonGenericMethod(JSContext* cx, const CallArgs& args) {
+ // This function is called when a self-hosted method is invoked on a
+ // wrapper object, like a CrossCompartmentWrapper. The last argument is
+ // the name of the self-hosted function. The other arguments are the
+ // arguments to pass to this function.
+
+ MOZ_ASSERT(args.length() > 0);
+ Rooted<PropertyName*> name(
+ cx, args[args.length() - 1].toString()->asAtom().asPropertyName());
+
+ InvokeArgs args2(cx);
+ if (!args2.init(cx, args.length() - 1)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < args.length() - 1; i++) {
+ args2[i].set(args[i]);
+ }
+
+ return CallSelfHostedFunction(cx, name, args.thisv(), args2, args.rval());
+}
+
+#ifdef DEBUG
+bool js::CallSelfHostedFunction(JSContext* cx, const char* name,
+ HandleValue thisv, const AnyInvokeArgs& args,
+ MutableHandleValue rval) {
+ JSAtom* funAtom = Atomize(cx, name, strlen(name));
+ if (!funAtom) {
+ return false;
+ }
+ Rooted<PropertyName*> funName(cx, funAtom->asPropertyName());
+ return CallSelfHostedFunction(cx, funName, thisv, args, rval);
+}
+#endif
+
+bool js::CallSelfHostedFunction(JSContext* cx, Handle<PropertyName*> name,
+ HandleValue thisv, const AnyInvokeArgs& args,
+ MutableHandleValue rval) {
+ RootedValue fun(cx);
+ if (!GlobalObject::getIntrinsicValue(cx, cx->global(), name, &fun)) {
+ return false;
+ }
+ MOZ_ASSERT(fun.toObject().is<JSFunction>());
+
+ return Call(cx, fun, thisv, args, rval);
+}
+
+template <typename T>
+bool Is(HandleValue v) {
+ return v.isObject() && v.toObject().is<T>();
+}
+
+template <IsAcceptableThis Test>
+static bool CallNonGenericSelfhostedMethod(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<Test, CallSelfHostedNonGenericMethod>(cx, args);
+}
+
+bool js::IsCallSelfHostedNonGenericMethod(NativeImpl impl) {
+ return impl == CallSelfHostedNonGenericMethod;
+}
+
+bool js::ReportIncompatibleSelfHostedMethod(JSContext* cx,
+ Handle<Value> thisValue) {
+ // The contract for this function is the same as
+ // CallSelfHostedNonGenericMethod. The normal ReportIncompatible function
+ // doesn't work for selfhosted functions, because they always call the
+ // different CallXXXMethodIfWrapped methods, which would be reported as the
+ // called function instead.
+
+ // Lookup the selfhosted method that was invoked. But skip over
+ // internal self-hosted function frames, because those are never the
+ // actual self-hosted callee from external code. We can't just skip
+ // self-hosted things until we find a non-self-hosted one because of cases
+ // like array.sort(somethingSelfHosted), where we want to report the error
+ // in the somethingSelfHosted, not in the sort() call.
+
+ static const char* const internalNames[] = {
+ "IsTypedArrayEnsuringArrayBuffer",
+ "RegExpSearchSlowPath",
+ "RegExpReplaceSlowPath",
+ "RegExpMatchSlowPath",
+ };
+
+ ScriptFrameIter iter(cx);
+ MOZ_ASSERT(iter.isFunctionFrame());
+
+ while (!iter.done()) {
+ MOZ_ASSERT(iter.callee(cx)->isSelfHostedOrIntrinsic());
+ UniqueChars funNameBytes;
+ const char* funName =
+ GetFunctionNameBytes(cx, iter.callee(cx), &funNameBytes);
+ if (!funName) {
+ return false;
+ }
+ if (std::all_of(
+ std::begin(internalNames), std::end(internalNames),
+ [funName](auto* name) { return strcmp(funName, name) != 0; })) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_INCOMPATIBLE_METHOD, funName, "method",
+ InformalValueTypeName(thisValue));
+ return false;
+ }
+ ++iter;
+ }
+
+ MOZ_ASSERT_UNREACHABLE("How did we not find a useful self-hosted frame?");
+ return false;
+}
+
+#ifdef JS_HAS_INTL_API
+/**
+ * Returns the default locale as a well-formed, but not necessarily
+ * canonicalized, BCP-47 language tag.
+ */
+static bool intrinsic_RuntimeDefaultLocale(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ const char* locale = cx->runtime()->getDefaultLocale();
+ if (!locale) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_DEFAULT_LOCALE_ERROR);
+ return false;
+ }
+
+ JSString* jslocale = NewStringCopyZ<CanGC>(cx, locale);
+ if (!jslocale) {
+ return false;
+ }
+
+ args.rval().setString(jslocale);
+ return true;
+}
+
+static bool intrinsic_IsRuntimeDefaultLocale(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isString() || args[0].isUndefined());
+
+ // |undefined| is the default value when the Intl runtime caches haven't
+ // yet been initialized. Handle it the same way as a cache miss.
+ if (args[0].isUndefined()) {
+ args.rval().setBoolean(false);
+ return true;
+ }
+
+ const char* locale = cx->runtime()->getDefaultLocale();
+ if (!locale) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_DEFAULT_LOCALE_ERROR);
+ return false;
+ }
+
+ JSLinearString* str = args[0].toString()->ensureLinear(cx);
+ if (!str) {
+ return false;
+ }
+
+ bool equals = StringEqualsAscii(str, locale);
+ args.rval().setBoolean(equals);
+ return true;
+}
+#endif // JS_HAS_INTL_API
+
+static bool intrinsic_ConstructFunction(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 3);
+ MOZ_ASSERT(IsConstructor(args[0]));
+ MOZ_ASSERT(IsConstructor(args[1]));
+ MOZ_ASSERT(args[2].toObject().is<ArrayObject>());
+
+ Rooted<ArrayObject*> argsList(cx, &args[2].toObject().as<ArrayObject>());
+ uint32_t len = argsList->length();
+ ConstructArgs constructArgs(cx);
+ if (!constructArgs.init(cx, len)) {
+ return false;
+ }
+ for (uint32_t index = 0; index < len; index++) {
+ constructArgs[index].set(argsList->getDenseElement(index));
+ }
+
+ RootedObject res(cx);
+ if (!Construct(cx, args[0], constructArgs, args[1], &res)) {
+ return false;
+ }
+
+ args.rval().setObject(*res);
+ return true;
+}
+
+static bool intrinsic_IsConstructing(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ ScriptFrameIter iter(cx);
+ bool isConstructing = iter.isConstructing();
+ args.rval().setBoolean(isConstructing);
+ return true;
+}
+
+static bool intrinsic_ConstructorForTypedArray(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ MOZ_ASSERT(args[0].isObject());
+
+ auto* object = UnwrapAndDowncastValue<TypedArrayObject>(cx, args[0]);
+ if (!object) {
+ return false;
+ }
+
+ JSProtoKey protoKey = StandardProtoKeyOrNull(object);
+ MOZ_ASSERT(protoKey);
+
+ // While it may seem like an invariant that in any compartment,
+ // seeing a typed array object implies that the TypedArray constructor
+ // for that type is initialized on the compartment's global, this is not
+ // the case. When we construct a typed array given a cross-compartment
+ // ArrayBuffer, we put the constructed TypedArray in the same compartment
+ // as the ArrayBuffer. Since we use the prototype from the initial
+ // compartment, and never call the constructor in the ArrayBuffer's
+ // compartment from script, we are not guaranteed to have initialized
+ // the constructor.
+ JSObject* ctor = GlobalObject::getOrCreateConstructor(cx, protoKey);
+ if (!ctor) {
+ return false;
+ }
+
+ args.rval().setObject(*ctor);
+ return true;
+}
+
+static bool intrinsic_PromiseResolve(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 2);
+
+ RootedObject constructor(cx, &args[0].toObject());
+ JSObject* promise = js::PromiseResolve(cx, constructor, args[1]);
+ if (!promise) {
+ return false;
+ }
+
+ args.rval().setObject(*promise);
+ return true;
+}
+
+static bool intrinsic_CopyDataPropertiesOrGetOwnKeys(JSContext* cx,
+ unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 3);
+ MOZ_ASSERT(args[0].isObject());
+ MOZ_ASSERT(args[1].isObject());
+ MOZ_ASSERT(args[2].isObjectOrNull());
+
+ RootedObject target(cx, &args[0].toObject());
+ RootedObject from(cx, &args[1].toObject());
+ RootedObject excludedItems(cx, args[2].toObjectOrNull());
+
+ if (from->is<NativeObject>() && target->is<PlainObject>() &&
+ (!excludedItems || excludedItems->is<PlainObject>())) {
+ bool optimized;
+ if (!CopyDataPropertiesNative(
+ cx, target.as<PlainObject>(), from.as<NativeObject>(),
+ (excludedItems ? excludedItems.as<PlainObject>() : nullptr),
+ &optimized)) {
+ return false;
+ }
+
+ if (optimized) {
+ args.rval().setNull();
+ return true;
+ }
+ }
+
+ return GetOwnPropertyKeys(
+ cx, from, JSITER_OWNONLY | JSITER_HIDDEN | JSITER_SYMBOLS, args.rval());
+}
+
+static bool intrinsic_ToBigInt(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+ BigInt* res = ToBigInt(cx, args[0]);
+ if (!res) {
+ return false;
+ }
+ args.rval().setBigInt(res);
+ return true;
+}
+
+static bool intrinsic_NewWrapForValidIterator(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ JSObject* obj = NewWrapForValidIterator(cx);
+ if (!obj) {
+ return false;
+ }
+
+ args.rval().setObject(*obj);
+ return true;
+}
+
+static bool intrinsic_NewIteratorHelper(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ JSObject* obj = NewIteratorHelper(cx);
+ if (!obj) {
+ return false;
+ }
+
+ args.rval().setObject(*obj);
+ return true;
+}
+
+static bool intrinsic_NewAsyncIteratorHelper(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ JSObject* obj = NewAsyncIteratorHelper(cx);
+ if (!obj) {
+ return false;
+ }
+
+ args.rval().setObject(*obj);
+ return true;
+}
+
+static bool intrinsic_NoPrivateGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_PRIVATE_SETTER_ONLY);
+
+ args.rval().setUndefined();
+ return false;
+}
+
+static bool intrinsic_newList(JSContext* cx, unsigned argc, js::Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 0);
+
+ ArrayObject* list = NewArrayWithNullProto(cx);
+ if (!list) {
+ return false;
+ }
+
+ args.rval().setObject(*list);
+ return true;
+}
+
+static const JSFunctionSpec intrinsic_functions[] = {
+ // Intrinsic helper functions
+ JS_INLINABLE_FN("ArrayBufferByteLength",
+ intrinsic_ArrayBufferByteLength<ArrayBufferObject>, 1, 0,
+ IntrinsicArrayBufferByteLength),
+ JS_FN("ArrayBufferCopyData",
+ intrinsic_ArrayBufferCopyData<ArrayBufferObject>, 6, 0),
+ JS_INLINABLE_FN("ArrayIteratorPrototypeOptimizable",
+ intrinsic_ArrayIteratorPrototypeOptimizable, 0, 0,
+ IntrinsicArrayIteratorPrototypeOptimizable),
+ JS_FN("ArrayNativeSort", intrinsic_ArrayNativeSort, 1, 0),
+ JS_FN("AssertionFailed", intrinsic_AssertionFailed, 1, 0),
+ JS_FN("CallArrayBufferMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<ArrayBufferObject>>, 2, 0),
+ JS_FN("CallArrayIteratorMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<ArrayIteratorObject>>, 2, 0),
+ JS_FN("CallAsyncIteratorHelperMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<AsyncIteratorHelperObject>>, 2, 0),
+ JS_FN("CallGeneratorMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<GeneratorObject>>, 2, 0),
+ JS_FN("CallIteratorHelperMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<IteratorHelperObject>>, 2, 0),
+ JS_FN("CallMapIteratorMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<MapIteratorObject>>, 2, 0),
+ JS_FN("CallMapMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<MapObject>>, 2, 0),
+ JS_FN("CallRegExpMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<RegExpObject>>, 2, 0),
+ JS_FN("CallRegExpStringIteratorMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<RegExpStringIteratorObject>>, 2, 0),
+ JS_FN("CallSetIteratorMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<SetIteratorObject>>, 2, 0),
+ JS_FN("CallSetMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<SetObject>>, 2, 0),
+ JS_FN("CallSharedArrayBufferMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<SharedArrayBufferObject>>, 2, 0),
+ JS_FN("CallStringIteratorMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<StringIteratorObject>>, 2, 0),
+ JS_FN("CallTypedArrayMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<TypedArrayObject>>, 2, 0),
+ JS_FN("CallWrapForValidIteratorMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<WrapForValidIteratorObject>>, 2, 0),
+ JS_FN("ConstructFunction", intrinsic_ConstructFunction, 2, 0),
+ JS_FN("ConstructorForTypedArray", intrinsic_ConstructorForTypedArray, 1, 0),
+ JS_FN("CopyDataPropertiesOrGetOwnKeys",
+ intrinsic_CopyDataPropertiesOrGetOwnKeys, 3, 0),
+ JS_FN("CreateMapIterationResultPair",
+ intrinsic_CreateMapIterationResultPair, 0, 0),
+ JS_FN("CreateSetIterationResult", intrinsic_CreateSetIterationResult, 0, 0),
+ JS_FN("DecompileArg", intrinsic_DecompileArg, 2, 0),
+ JS_FN("DefineDataProperty", intrinsic_DefineDataProperty, 4, 0),
+ JS_FN("DefineProperty", intrinsic_DefineProperty, 6, 0),
+ JS_FN("DumpMessage", intrinsic_DumpMessage, 1, 0),
+ JS_FN("FlatStringMatch", FlatStringMatch, 2, 0),
+ JS_FN("FlatStringSearch", FlatStringSearch, 2, 0),
+ JS_FN("GeneratorIsRunning", intrinsic_GeneratorIsRunning, 1, 0),
+ JS_FN("GeneratorObjectIsClosed", intrinsic_GeneratorObjectIsClosed, 1, 0),
+ JS_FN("GeneratorSetClosed", intrinsic_GeneratorSetClosed, 1, 0),
+ JS_FN("GetElemBaseForLambda", intrinsic_GetElemBaseForLambda, 1, 0),
+ JS_INLINABLE_FN("GetFirstDollarIndex", GetFirstDollarIndex, 1, 0,
+ GetFirstDollarIndex),
+ JS_INLINABLE_FN("GetNextMapEntryForIterator",
+ intrinsic_GetNextMapEntryForIterator, 2, 0,
+ IntrinsicGetNextMapEntryForIterator),
+ JS_INLINABLE_FN("GetNextSetEntryForIterator",
+ intrinsic_GetNextSetEntryForIterator, 2, 0,
+ IntrinsicGetNextSetEntryForIterator),
+ JS_FN("GetOwnPropertyDescriptorToArray", GetOwnPropertyDescriptorToArray, 2,
+ 0),
+ JS_FN("GetStringDataProperty", intrinsic_GetStringDataProperty, 2, 0),
+ JS_FN("GetTypedArrayKind", intrinsic_GetTypedArrayKind, 1, 0),
+ JS_INLINABLE_FN("GuardToArrayBuffer",
+ intrinsic_GuardToBuiltin<ArrayBufferObject>, 1, 0,
+ IntrinsicGuardToArrayBuffer),
+ JS_INLINABLE_FN("GuardToArrayIterator",
+ intrinsic_GuardToBuiltin<ArrayIteratorObject>, 1, 0,
+ IntrinsicGuardToArrayIterator),
+ JS_INLINABLE_FN("GuardToAsyncIteratorHelper",
+ intrinsic_GuardToBuiltin<AsyncIteratorHelperObject>, 1, 0,
+ IntrinsicGuardToAsyncIteratorHelper),
+ JS_INLINABLE_FN("GuardToIteratorHelper",
+ intrinsic_GuardToBuiltin<IteratorHelperObject>, 1, 0,
+ IntrinsicGuardToIteratorHelper),
+ JS_INLINABLE_FN("GuardToMapIterator",
+ intrinsic_GuardToBuiltin<MapIteratorObject>, 1, 0,
+ IntrinsicGuardToMapIterator),
+ JS_INLINABLE_FN("GuardToMapObject", intrinsic_GuardToBuiltin<MapObject>, 1,
+ 0, IntrinsicGuardToMapObject),
+ JS_INLINABLE_FN("GuardToRegExpStringIterator",
+ intrinsic_GuardToBuiltin<RegExpStringIteratorObject>, 1, 0,
+ IntrinsicGuardToRegExpStringIterator),
+ JS_INLINABLE_FN("GuardToSetIterator",
+ intrinsic_GuardToBuiltin<SetIteratorObject>, 1, 0,
+ IntrinsicGuardToSetIterator),
+ JS_INLINABLE_FN("GuardToSetObject", intrinsic_GuardToBuiltin<SetObject>, 1,
+ 0, IntrinsicGuardToSetObject),
+ JS_INLINABLE_FN("GuardToSharedArrayBuffer",
+ intrinsic_GuardToBuiltin<SharedArrayBufferObject>, 1, 0,
+ IntrinsicGuardToSharedArrayBuffer),
+ JS_INLINABLE_FN("GuardToStringIterator",
+ intrinsic_GuardToBuiltin<StringIteratorObject>, 1, 0,
+ IntrinsicGuardToStringIterator),
+ JS_INLINABLE_FN("GuardToWrapForValidIterator",
+ intrinsic_GuardToBuiltin<WrapForValidIteratorObject>, 1, 0,
+ IntrinsicGuardToWrapForValidIterator),
+ JS_FN("IntrinsicAsyncGeneratorNext", AsyncGeneratorNext, 1, 0),
+ JS_FN("IntrinsicAsyncGeneratorReturn", AsyncGeneratorReturn, 1, 0),
+ JS_FN("IntrinsicAsyncGeneratorThrow", AsyncGeneratorThrow, 1, 0),
+ JS_INLINABLE_FN("IsArray", intrinsic_IsArray, 1, 0, ArrayIsArray),
+ JS_FN("IsAsyncFunctionGeneratorObject",
+ intrinsic_IsInstanceOfBuiltin<AsyncFunctionGeneratorObject>, 1, 0),
+ JS_FN("IsAsyncGeneratorObject",
+ intrinsic_IsInstanceOfBuiltin<AsyncGeneratorObject>, 1, 0),
+ JS_INLINABLE_FN("IsCallable", intrinsic_IsCallable, 1, 0,
+ IntrinsicIsCallable),
+ JS_INLINABLE_FN("IsConstructing", intrinsic_IsConstructing, 0, 0,
+ IntrinsicIsConstructing),
+ JS_INLINABLE_FN("IsConstructor", intrinsic_IsConstructor, 1, 0,
+ IntrinsicIsConstructor),
+ JS_INLINABLE_FN("IsCrossRealmArrayConstructor",
+ intrinsic_IsCrossRealmArrayConstructor, 1, 0,
+ IntrinsicIsCrossRealmArrayConstructor),
+ JS_FN("IsGeneratorObject", intrinsic_IsInstanceOfBuiltin<GeneratorObject>,
+ 1, 0),
+ JS_INLINABLE_FN("IsObject", intrinsic_IsObject, 1, 0, IntrinsicIsObject),
+ JS_INLINABLE_FN("IsPackedArray", intrinsic_IsPackedArray, 1, 0,
+ IntrinsicIsPackedArray),
+ JS_INLINABLE_FN("IsPossiblyWrappedRegExpObject",
+ intrinsic_IsPossiblyWrappedInstanceOfBuiltin<RegExpObject>,
+ 1, 0, IsPossiblyWrappedRegExpObject),
+ JS_INLINABLE_FN(
+ "IsPossiblyWrappedTypedArray",
+ intrinsic_IsPossiblyWrappedInstanceOfBuiltin<TypedArrayObject>, 1, 0,
+ IntrinsicIsPossiblyWrappedTypedArray),
+ JS_INLINABLE_FN("IsRegExpObject",
+ intrinsic_IsInstanceOfBuiltin<RegExpObject>, 1, 0,
+ IsRegExpObject),
+ JS_INLINABLE_FN("IsSuspendedGenerator", intrinsic_IsSuspendedGenerator, 1,
+ 0, IntrinsicIsSuspendedGenerator),
+#ifdef ENABLE_RECORD_TUPLE
+ JS_FN("IsTuple", intrinsic_IsTuple, 1, 0),
+#endif
+ JS_INLINABLE_FN("IsTypedArray",
+ intrinsic_IsInstanceOfBuiltin<TypedArrayObject>, 1, 0,
+ IntrinsicIsTypedArray),
+ JS_INLINABLE_FN("IsTypedArrayConstructor",
+ intrinsic_IsTypedArrayConstructor, 1, 0,
+ IntrinsicIsTypedArrayConstructor),
+ JS_FN("IsWrappedArrayBuffer",
+ intrinsic_IsWrappedInstanceOfBuiltin<ArrayBufferObject>, 1, 0),
+ JS_FN("IsWrappedSharedArrayBuffer",
+ intrinsic_IsWrappedInstanceOfBuiltin<SharedArrayBufferObject>, 1, 0),
+ JS_INLINABLE_FN("NewArrayIterator", intrinsic_NewArrayIterator, 0, 0,
+ IntrinsicNewArrayIterator),
+ JS_FN("NewAsyncIteratorHelper", intrinsic_NewAsyncIteratorHelper, 0, 0),
+ JS_FN("NewIteratorHelper", intrinsic_NewIteratorHelper, 0, 0),
+ JS_INLINABLE_FN("NewRegExpStringIterator",
+ intrinsic_NewRegExpStringIterator, 0, 0,
+ IntrinsicNewRegExpStringIterator),
+ JS_INLINABLE_FN("NewStringIterator", intrinsic_NewStringIterator, 0, 0,
+ IntrinsicNewStringIterator),
+ JS_FN("NewWrapForValidIterator", intrinsic_NewWrapForValidIterator, 0, 0),
+ JS_FN("NoPrivateGetter", intrinsic_NoPrivateGetter, 1, 0),
+ JS_INLINABLE_FN("ObjectHasPrototype", intrinsic_ObjectHasPrototype, 2, 0,
+ IntrinsicObjectHasPrototype),
+ JS_INLINABLE_FN(
+ "PossiblyWrappedArrayBufferByteLength",
+ intrinsic_PossiblyWrappedArrayBufferByteLength<ArrayBufferObject>, 1, 0,
+ IntrinsicPossiblyWrappedArrayBufferByteLength),
+ JS_FN(
+ "PossiblyWrappedSharedArrayBufferByteLength",
+ intrinsic_PossiblyWrappedArrayBufferByteLength<SharedArrayBufferObject>,
+ 1, 0),
+ JS_FN("PossiblyWrappedTypedArrayHasDetachedBuffer",
+ intrinsic_PossiblyWrappedTypedArrayHasDetachedBuffer, 1, 0),
+ JS_INLINABLE_FN("PossiblyWrappedTypedArrayLength",
+ intrinsic_PossiblyWrappedTypedArrayLength, 1, 0,
+ IntrinsicPossiblyWrappedTypedArrayLength),
+ JS_FN("PromiseResolve", intrinsic_PromiseResolve, 2, 0),
+ JS_INLINABLE_FN("RegExpBuiltinExec", intrinsic_RegExpBuiltinExec<false>, 2,
+ 0, IntrinsicRegExpBuiltinExec),
+ JS_INLINABLE_FN("RegExpBuiltinExecForTest",
+ intrinsic_RegExpBuiltinExec<true>, 2, 0,
+ IntrinsicRegExpBuiltinExecForTest),
+ JS_FN("RegExpConstructRaw", regexp_construct_raw_flags, 2, 0),
+ JS_FN("RegExpCreate", intrinsic_RegExpCreate, 2, 0),
+ JS_INLINABLE_FN("RegExpExec", intrinsic_RegExpExec<false>, 2, 0,
+ IntrinsicRegExpExec),
+ JS_INLINABLE_FN("RegExpExecForTest", intrinsic_RegExpExec<true>, 2, 0,
+ IntrinsicRegExpExecForTest),
+ JS_FN("RegExpGetSubstitution", intrinsic_RegExpGetSubstitution, 5, 0),
+ JS_INLINABLE_FN("RegExpInstanceOptimizable", RegExpInstanceOptimizable, 1,
+ 0, RegExpInstanceOptimizable),
+ JS_INLINABLE_FN("RegExpMatcher", RegExpMatcher, 3, 0, RegExpMatcher),
+ JS_INLINABLE_FN("RegExpPrototypeOptimizable", RegExpPrototypeOptimizable, 1,
+ 0, RegExpPrototypeOptimizable),
+ JS_INLINABLE_FN("RegExpSearcher", RegExpSearcher, 3, 0, RegExpSearcher),
+ JS_INLINABLE_FN("SameValue", js::obj_is, 2, 0, ObjectIs),
+ JS_FN("SharedArrayBufferByteLength",
+ intrinsic_ArrayBufferByteLength<SharedArrayBufferObject>, 1, 0),
+ JS_FN("SharedArrayBufferCopyData",
+ intrinsic_ArrayBufferCopyData<SharedArrayBufferObject>, 6, 0),
+ JS_FN("SharedArrayBuffersMemorySame",
+ intrinsic_SharedArrayBuffersMemorySame, 2, 0),
+ JS_FN("StringReplaceAllString", intrinsic_StringReplaceAllString, 3, 0),
+ JS_INLINABLE_FN("StringReplaceString", intrinsic_StringReplaceString, 3, 0,
+ IntrinsicStringReplaceString),
+ JS_INLINABLE_FN("StringSplitString", intrinsic_StringSplitString, 2, 0,
+ IntrinsicStringSplitString),
+ JS_FN("StringSplitStringLimit", intrinsic_StringSplitStringLimit, 3, 0),
+ JS_INLINABLE_FN("SubstringKernel", intrinsic_SubstringKernel, 3, 0,
+ IntrinsicSubstringKernel),
+ JS_FN("ThisNumberValueForToLocaleString", ThisNumberValueForToLocaleString,
+ 0, 0),
+ JS_FN("ThisTimeValue", intrinsic_ThisTimeValue, 1, 0),
+#ifdef ENABLE_RECORD_TUPLE
+ JS_FN("ThisTupleValue", intrinsic_ThisTupleValue, 1, 0),
+#endif
+ JS_FN("ThrowAggregateError", intrinsic_ThrowAggregateError, 4, 0),
+ JS_FN("ThrowInternalError", intrinsic_ThrowInternalError, 4, 0),
+ JS_FN("ThrowRangeError", intrinsic_ThrowRangeError, 4, 0),
+ JS_FN("ThrowTypeError", intrinsic_ThrowTypeError, 4, 0),
+ JS_FN("ToBigInt", intrinsic_ToBigInt, 1, 0),
+ JS_INLINABLE_FN("ToInteger", intrinsic_ToInteger, 1, 0, IntrinsicToInteger),
+ JS_INLINABLE_FN("ToLength", intrinsic_ToLength, 1, 0, IntrinsicToLength),
+ JS_INLINABLE_FN("ToObject", intrinsic_ToObject, 1, 0, IntrinsicToObject),
+ JS_FN("ToPropertyKey", intrinsic_ToPropertyKey, 1, 0),
+ JS_FN("ToSource", intrinsic_ToSource, 1, 0),
+#ifdef ENABLE_RECORD_TUPLE
+ JS_FN("TupleLength", intrinsic_TupleLength, 1, 0),
+#endif
+ JS_FN("TypedArrayBitwiseSlice", intrinsic_TypedArrayBitwiseSlice, 4, 0),
+ JS_FN("TypedArrayBuffer", intrinsic_TypedArrayBuffer, 1, 0),
+ JS_INLINABLE_FN("TypedArrayByteOffset", intrinsic_TypedArrayByteOffset, 1,
+ 0, IntrinsicTypedArrayByteOffset),
+ JS_INLINABLE_FN("TypedArrayElementSize", intrinsic_TypedArrayElementSize, 1,
+ 0, IntrinsicTypedArrayElementSize),
+ JS_FN("TypedArrayInitFromPackedArray",
+ intrinsic_TypedArrayInitFromPackedArray, 2, 0),
+ JS_INLINABLE_FN("TypedArrayLength", intrinsic_TypedArrayLength, 1, 0,
+ IntrinsicTypedArrayLength),
+ JS_FN("TypedArrayNativeSort", intrinsic_TypedArrayNativeSort, 1, 0),
+ JS_INLINABLE_FN("UnsafeGetInt32FromReservedSlot",
+ intrinsic_UnsafeGetInt32FromReservedSlot, 2, 0,
+ IntrinsicUnsafeGetInt32FromReservedSlot),
+ JS_INLINABLE_FN("UnsafeGetObjectFromReservedSlot",
+ intrinsic_UnsafeGetObjectFromReservedSlot, 2, 0,
+ IntrinsicUnsafeGetObjectFromReservedSlot),
+ JS_INLINABLE_FN("UnsafeGetReservedSlot", intrinsic_UnsafeGetReservedSlot, 2,
+ 0, IntrinsicUnsafeGetReservedSlot),
+ JS_INLINABLE_FN("UnsafeGetStringFromReservedSlot",
+ intrinsic_UnsafeGetStringFromReservedSlot, 2, 0,
+ IntrinsicUnsafeGetStringFromReservedSlot),
+ JS_INLINABLE_FN("UnsafeSetReservedSlot", intrinsic_UnsafeSetReservedSlot, 3,
+ 0, IntrinsicUnsafeSetReservedSlot),
+
+// Intrinsics and standard functions used by Intl API implementation.
+#ifdef JS_HAS_INTL_API
+ JS_FN("intl_BestAvailableLocale", intl_BestAvailableLocale, 3, 0),
+ JS_FN("intl_CallCollatorMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<CollatorObject>>, 2, 0),
+ JS_FN("intl_CallDateTimeFormatMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<DateTimeFormatObject>>, 2, 0),
+ JS_FN("intl_CallDisplayNamesMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<DisplayNamesObject>>, 2, 0),
+ JS_FN("intl_CallListFormatMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<ListFormatObject>>, 2, 0),
+ JS_FN("intl_CallNumberFormatMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<NumberFormatObject>>, 2, 0),
+ JS_FN("intl_CallPluralRulesMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<PluralRulesObject>>, 2, 0),
+ JS_FN("intl_CallRelativeTimeFormatMethodIfWrapped",
+ CallNonGenericSelfhostedMethod<Is<RelativeTimeFormatObject>>, 2, 0),
+ JS_FN("intl_Collator", intl_Collator, 2, 0),
+ JS_FN("intl_CompareStrings", intl_CompareStrings, 3, 0),
+ JS_FN("intl_ComputeDisplayName", intl_ComputeDisplayName, 6, 0),
+ JS_FN("intl_DateTimeFormat", intl_DateTimeFormat, 2, 0),
+ JS_FN("intl_FormatDateTime", intl_FormatDateTime, 2, 0),
+ JS_FN("intl_FormatDateTimeRange", intl_FormatDateTimeRange, 4, 0),
+ JS_FN("intl_FormatList", intl_FormatList, 3, 0),
+ JS_FN("intl_FormatNumber", intl_FormatNumber, 3, 0),
+ JS_FN("intl_FormatNumberRange", intl_FormatNumberRange, 4, 0),
+ JS_FN("intl_FormatRelativeTime", intl_FormatRelativeTime, 4, 0),
+ JS_FN("intl_GetCalendarInfo", intl_GetCalendarInfo, 1, 0),
+ JS_FN("intl_GetPluralCategories", intl_GetPluralCategories, 1, 0),
+ JS_INLINABLE_FN("intl_GuardToCollator",
+ intrinsic_GuardToBuiltin<CollatorObject>, 1, 0,
+ IntlGuardToCollator),
+ JS_INLINABLE_FN("intl_GuardToDateTimeFormat",
+ intrinsic_GuardToBuiltin<DateTimeFormatObject>, 1, 0,
+ IntlGuardToDateTimeFormat),
+ JS_INLINABLE_FN("intl_GuardToDisplayNames",
+ intrinsic_GuardToBuiltin<DisplayNamesObject>, 1, 0,
+ IntlGuardToDisplayNames),
+ JS_INLINABLE_FN("intl_GuardToListFormat",
+ intrinsic_GuardToBuiltin<ListFormatObject>, 1, 0,
+ IntlGuardToListFormat),
+ JS_INLINABLE_FN("intl_GuardToNumberFormat",
+ intrinsic_GuardToBuiltin<NumberFormatObject>, 1, 0,
+ IntlGuardToNumberFormat),
+ JS_INLINABLE_FN("intl_GuardToPluralRules",
+ intrinsic_GuardToBuiltin<PluralRulesObject>, 1, 0,
+ IntlGuardToPluralRules),
+ JS_INLINABLE_FN("intl_GuardToRelativeTimeFormat",
+ intrinsic_GuardToBuiltin<RelativeTimeFormatObject>, 1, 0,
+ IntlGuardToRelativeTimeFormat),
+ JS_FN("intl_IsRuntimeDefaultLocale", intrinsic_IsRuntimeDefaultLocale, 1,
+ 0),
+ JS_FN("intl_IsValidTimeZoneName", intl_IsValidTimeZoneName, 1, 0),
+ JS_FN("intl_IsWrappedDateTimeFormat",
+ intrinsic_IsWrappedInstanceOfBuiltin<DateTimeFormatObject>, 1, 0),
+ JS_FN("intl_IsWrappedNumberFormat",
+ intrinsic_IsWrappedInstanceOfBuiltin<NumberFormatObject>, 1, 0),
+ JS_FN("intl_NumberFormat", intl_NumberFormat, 2, 0),
+ JS_FN("intl_RuntimeDefaultLocale", intrinsic_RuntimeDefaultLocale, 0, 0),
+ JS_FN("intl_SelectPluralRule", intl_SelectPluralRule, 2, 0),
+ JS_FN("intl_SelectPluralRuleRange", intl_SelectPluralRuleRange, 3, 0),
+ JS_FN("intl_SupportedValuesOf", intl_SupportedValuesOf, 1, 0),
+ JS_FN("intl_TryValidateAndCanonicalizeLanguageTag",
+ intl_TryValidateAndCanonicalizeLanguageTag, 1, 0),
+ JS_FN("intl_ValidateAndCanonicalizeLanguageTag",
+ intl_ValidateAndCanonicalizeLanguageTag, 2, 0),
+ JS_FN("intl_ValidateAndCanonicalizeUnicodeExtensionType",
+ intl_ValidateAndCanonicalizeUnicodeExtensionType, 3, 0),
+ JS_FN("intl_availableCalendars", intl_availableCalendars, 1, 0),
+ JS_FN("intl_availableCollations", intl_availableCollations, 1, 0),
+# if DEBUG || MOZ_SYSTEM_ICU
+ JS_FN("intl_availableMeasurementUnits", intl_availableMeasurementUnits, 0,
+ 0),
+# endif
+ JS_FN("intl_canonicalizeTimeZone", intl_canonicalizeTimeZone, 1, 0),
+ JS_FN("intl_defaultCalendar", intl_defaultCalendar, 1, 0),
+ JS_FN("intl_defaultTimeZone", intl_defaultTimeZone, 0, 0),
+ JS_FN("intl_defaultTimeZoneOffset", intl_defaultTimeZoneOffset, 0, 0),
+ JS_FN("intl_isDefaultTimeZone", intl_isDefaultTimeZone, 1, 0),
+ JS_FN("intl_isUpperCaseFirst", intl_isUpperCaseFirst, 1, 0),
+ JS_FN("intl_numberingSystem", intl_numberingSystem, 1, 0),
+ JS_FN("intl_resolveDateTimeFormatComponents",
+ intl_resolveDateTimeFormatComponents, 3, 0),
+ JS_FN("intl_supportedLocaleOrFallback", intl_supportedLocaleOrFallback, 1,
+ 0),
+ JS_FN("intl_toLocaleLowerCase", intl_toLocaleLowerCase, 2, 0),
+ JS_FN("intl_toLocaleUpperCase", intl_toLocaleUpperCase, 2, 0),
+#endif // JS_HAS_INTL_API
+
+ // Standard builtins used by self-hosting.
+ JS_FN("new_List", intrinsic_newList, 0, 0),
+ JS_INLINABLE_FN("std_Array", array_construct, 1, 0, Array),
+ JS_FN("std_Array_includes", array_includes, 1, 0),
+ JS_FN("std_Array_indexOf", array_indexOf, 1, 0),
+ JS_FN("std_Array_lastIndexOf", array_lastIndexOf, 1, 0),
+ JS_INLINABLE_FN("std_Array_pop", array_pop, 0, 0, ArrayPop),
+ JS_FN("std_BigInt_valueOf", BigIntObject::valueOf, 0, 0),
+ JS_FN("std_Date_now", date_now, 0, 0),
+ JS_FN("std_Function_apply", fun_apply, 2, 0),
+ JS_FN("std_Map_entries", MapObject::entries, 0, 0),
+ JS_FN("std_Map_get", MapObject::get, 1, 0),
+ JS_FN("std_Map_set", MapObject::set, 2, 0),
+ JS_INLINABLE_FN("std_Math_abs", math_abs, 1, 0, MathAbs),
+ JS_INLINABLE_FN("std_Math_floor", math_floor, 1, 0, MathFloor),
+ JS_INLINABLE_FN("std_Math_max", math_max, 2, 0, MathMax),
+ JS_INLINABLE_FN("std_Math_min", math_min, 2, 0, MathMin),
+ JS_INLINABLE_FN("std_Math_trunc", math_trunc, 1, 0, MathTrunc),
+ JS_INLINABLE_FN("std_Object_create", obj_create, 2, 0, ObjectCreate),
+ JS_INLINABLE_FN("std_Object_isPrototypeOf", obj_isPrototypeOf, 1, 0,
+ ObjectIsPrototypeOf),
+ JS_FN("std_Object_propertyIsEnumerable", obj_propertyIsEnumerable, 1, 0),
+ JS_FN("std_Object_setProto", obj_setProto, 1, 0),
+ JS_FN("std_Object_toString", obj_toString, 0, 0),
+ JS_INLINABLE_FN("std_Reflect_getPrototypeOf", Reflect_getPrototypeOf, 1, 0,
+ ReflectGetPrototypeOf),
+ JS_FN("std_Reflect_isExtensible", Reflect_isExtensible, 1, 0),
+ JS_FN("std_Reflect_ownKeys", Reflect_ownKeys, 1, 0),
+ JS_FN("std_Set_add", SetObject::add, 1, 0),
+ JS_FN("std_Set_has", SetObject::has, 1, 0),
+ JS_FN("std_Set_values", SetObject::values, 0, 0),
+ JS_INLINABLE_FN("std_String_charCodeAt", str_charCodeAt, 1, 0,
+ StringCharCodeAt),
+ JS_INLINABLE_FN("std_String_endsWith", str_endsWith, 1, 0, StringEndsWith),
+ JS_INLINABLE_FN("std_String_fromCharCode", str_fromCharCode, 1, 0,
+ StringFromCharCode),
+ JS_INLINABLE_FN("std_String_fromCodePoint", str_fromCodePoint, 1, 0,
+ StringFromCodePoint),
+ JS_FN("std_String_includes", str_includes, 1, 0),
+ JS_INLINABLE_FN("std_String_indexOf", str_indexOf, 1, 0, StringIndexOf),
+ JS_INLINABLE_FN("std_String_startsWith", str_startsWith, 1, 0,
+ StringStartsWith),
+#ifdef ENABLE_RECORD_TUPLE
+ JS_FN("std_Tuple_unchecked", tuple_construct, 1, 0),
+#endif
+
+ JS_FS_END};
+
+#ifdef DEBUG
+
+static void CheckSelfHostedIntrinsics() {
+ // The `intrinsic_functions` list must be sorted so that we can use
+ // mozilla::BinarySearch to do lookups on demand.
+ const char* prev = "";
+ for (JSFunctionSpec spec : intrinsic_functions) {
+ if (spec.name.string()) {
+ MOZ_ASSERT(strcmp(prev, spec.name.string()) < 0,
+ "Self-hosted intrinsics must be sorted");
+ prev = spec.name.string();
+ }
+ }
+}
+
+class CheckTenuredTracer : public JS::CallbackTracer {
+ HashSet<gc::Cell*, DefaultHasher<gc::Cell*>, SystemAllocPolicy> visited;
+ Vector<JS::GCCellPtr, 0, SystemAllocPolicy> stack;
+
+ public:
+ explicit CheckTenuredTracer(JSRuntime* rt) : JS::CallbackTracer(rt) {}
+ void check() {
+ while (!stack.empty()) {
+ JS::TraceChildren(this, stack.popCopy());
+ }
+ }
+ void onChild(JS::GCCellPtr thing, const char* name) override {
+ gc::Cell* cell = thing.asCell();
+ MOZ_RELEASE_ASSERT(cell->isTenured(), "Expected tenured cell");
+ if (!visited.has(cell)) {
+ if (!visited.put(cell) || !stack.append(thing)) {
+ // Ignore OOM. This can happen during fuzzing.
+ return;
+ }
+ }
+ }
+};
+
+static void CheckSelfHostingDataIsTenured(JSRuntime* rt) {
+ // Check everything is tenured as we don't trace it when collecting the
+ // nursery.
+ CheckTenuredTracer trc(rt);
+ rt->traceSelfHostingStencil(&trc);
+ trc.check();
+}
+
+#endif
+
+const JSFunctionSpec* js::FindIntrinsicSpec(js::PropertyName* name) {
+ size_t limit = std::size(intrinsic_functions) - 1;
+ MOZ_ASSERT(!intrinsic_functions[limit].name);
+
+ MOZ_ASSERT(name->hasLatin1Chars());
+
+ JS::AutoCheckCannotGC nogc;
+ const char* chars = reinterpret_cast<const char*>(name->latin1Chars(nogc));
+ size_t len = name->length();
+
+ // NOTE: CheckSelfHostedIntrinsics checks that the intrinsic_functions list is
+ // sorted appropriately so that we can use binary search here.
+
+ size_t loc = 0;
+ bool match = mozilla::BinarySearchIf(
+ intrinsic_functions, 0, limit,
+ [chars, len](const JSFunctionSpec& spec) {
+ // The spec string is null terminated but the `name` string is not, so
+ // compare chars up until the length of `name`. Since the `name` string
+ // does not contain any nulls, seeing the null terminator of the spec
+ // string will terminate the loop appropriately. A final comparison
+ // against null is needed to determine if the spec string has an extra
+ // suffix.
+ const char* spec_chars = spec.name.string();
+ for (size_t i = 0; i < len; ++i) {
+ if (auto cmp_result = int(chars[i]) - int(spec_chars[i])) {
+ return cmp_result;
+ }
+ }
+ return int('\0') - int(spec_chars[len]);
+ },
+ &loc);
+ if (match) {
+ return &intrinsic_functions[loc];
+ }
+ return nullptr;
+}
+
+void js::FillSelfHostingCompileOptions(CompileOptions& options) {
+ /*
+ * In self-hosting mode, scripts use JSOp::GetIntrinsic instead of
+ * JSOp::GetName or JSOp::GetGName to access unbound variables.
+ * JSOp::GetIntrinsic does a name lookup on a special object, whose
+ * properties are filled in lazily upon first access for a given global.
+ *
+ * As that object is inaccessible to client code, the lookups are
+ * guaranteed to return the original objects, ensuring safe implementation
+ * of self-hosted builtins.
+ *
+ * Additionally, the special syntax callFunction(fun, receiver, ...args)
+ * is supported, for which bytecode is emitted that invokes |fun| with
+ * |receiver| as the this-object and ...args as the arguments.
+ */
+ options.setIntroductionType("self-hosted");
+ options.setFileAndLine("self-hosted", 1);
+ options.setSkipFilenameValidation(true);
+ options.setSelfHostingMode(true);
+ options.setForceFullParse();
+ options.setForceStrictMode();
+ options.setDiscardSource();
+ options.setIsRunOnce(true);
+ options.setNoScriptRval(true);
+}
+
+// Report all errors and warnings to stderr because it is too early in the
+// startup process for any other error reporting to be used, and we don't want
+// errors in self-hosted code to be silently swallowed.
+class MOZ_STACK_CLASS AutoPrintSelfHostingFrontendContext
+ : public FrontendContext {
+ JSContext* cx_;
+
+ public:
+ explicit AutoPrintSelfHostingFrontendContext(JSContext* cx)
+ : FrontendContext(), cx_(cx) {
+ setCurrentJSContext(cx_);
+ }
+ ~AutoPrintSelfHostingFrontendContext() {
+ // TODO: Remove this once JSContext is removed from frontend.
+ MaybePrintAndClearPendingException(cx_);
+
+ if (hadOutOfMemory()) {
+ fprintf(stderr, "Out of memory\n");
+ }
+
+ if (maybeError()) {
+ JS::PrintError(stderr, &*maybeError(), true);
+ }
+ for (CompileError& error : warnings()) {
+ JS::PrintError(stderr, &error, true);
+ }
+ if (hadOverRecursed()) {
+ fprintf(stderr, "Over recursed\n");
+ }
+ if (hadAllocationOverflow()) {
+ fprintf(stderr, "Allocation overflow\n");
+ }
+ }
+};
+
+[[nodiscard]] static bool InitSelfHostingFromStencil(
+ JSContext* cx, frontend::CompilationAtomCache& atomCache,
+ const frontend::CompilationStencil& stencil) {
+ // Build the JSAtom -> ScriptIndexRange mapping and save on the runtime.
+ {
+ auto& scriptMap = cx->runtime()->selfHostScriptMap.ref();
+
+ // We don't easily know the number of top-level functions, so use the total
+ // number of stencil functions instead. There is very little nesting of
+ // functions in self-hosted code so this is a good approximation.
+ size_t numSelfHostedScripts = stencil.scriptData.size();
+ if (!scriptMap.reserve(numSelfHostedScripts)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ auto topLevelThings =
+ stencil.scriptData[frontend::CompilationStencil::TopLevelIndex]
+ .gcthings(stencil);
+
+ // Iterate over the (named) top-level functions. We record the ScriptIndex
+ // as well as the ScriptIndex of the next top-level function. Scripts
+ // between these two indices are the inner functions of the first one. We
+ // only record named scripts here since they are what might be looked up.
+ Rooted<JSAtom*> prevAtom(cx);
+ frontend::ScriptIndex prevIndex;
+ for (frontend::TaggedScriptThingIndex thing : topLevelThings) {
+ if (!thing.isFunction()) {
+ continue;
+ }
+
+ frontend::ScriptIndex index = thing.toFunction();
+ const auto& script = stencil.scriptData[index];
+
+ if (prevAtom) {
+ frontend::ScriptIndexRange range{prevIndex, index};
+ scriptMap.putNewInfallible(prevAtom, range);
+ }
+
+ prevAtom = script.functionAtom
+ ? atomCache.getExistingAtomAt(cx, script.functionAtom)
+ : nullptr;
+ prevIndex = index;
+ }
+ if (prevAtom) {
+ frontend::ScriptIndexRange range{
+ prevIndex, frontend::ScriptIndex(stencil.scriptData.size())};
+ scriptMap.putNewInfallible(prevAtom, range);
+ }
+
+ // We over-estimated the capacity of `scriptMap`, so check that the estimate
+ // hasn't drifted too hasn't drifted too far since this was written. If this
+ // assert fails, we may need a new way to size the `scriptMap`.
+ MOZ_ASSERT(numSelfHostedScripts < (scriptMap.count() * 1.15));
+ }
+
+#ifdef DEBUG
+ // Check that the list of intrinsics is well-formed.
+ CheckSelfHostedIntrinsics();
+ CheckSelfHostingDataIsTenured(cx->runtime());
+#endif
+
+ return true;
+}
+
+bool JSRuntime::initSelfHostingStencil(JSContext* cx,
+ JS::SelfHostedCache xdrCache,
+ JS::SelfHostedWriter xdrWriter) {
+ if (parentRuntime) {
+ MOZ_RELEASE_ASSERT(
+ parentRuntime->hasInitializedSelfHosting(),
+ "Parent runtime must initialize self-hosting before workers");
+
+ selfHostStencilInput_ = parentRuntime->selfHostStencilInput_;
+ selfHostStencil_ = parentRuntime->selfHostStencil_;
+ return true;
+ }
+
+ // Variables used to instantiate scripts.
+ CompileOptions options(cx);
+ FillSelfHostingCompileOptions(options);
+
+ // Try initializing from Stencil XDR.
+ bool decodeOk = false;
+ AutoPrintSelfHostingFrontendContext fc(cx);
+ if (xdrCache.Length() > 0) {
+ // Allow the VM to directly use bytecode from the XDR buffer without
+ // copying it. The buffer must outlive all runtimes (including workers).
+ options.borrowBuffer = true;
+ options.usePinnedBytecode = true;
+
+ Rooted<UniquePtr<frontend::CompilationInput>> input(
+ cx, cx->new_<frontend::CompilationInput>(options));
+ if (!input) {
+ return false;
+ }
+ {
+ AutoReportFrontendContext fc(cx);
+ if (!input->initForSelfHostingGlobal(&fc)) {
+ return false;
+ }
+ }
+
+ RefPtr<frontend::CompilationStencil> stencil(
+ cx->new_<frontend::CompilationStencil>(input->source));
+ if (!stencil) {
+ return false;
+ }
+ if (!stencil->deserializeStencils(&fc, options, xdrCache, &decodeOk)) {
+ return false;
+ }
+
+ if (decodeOk) {
+ MOZ_ASSERT(input->atomCache.empty());
+
+ MOZ_ASSERT(!hasSelfHostStencil());
+
+ // Move it to the runtime.
+ setSelfHostingStencil(&input, std::move(stencil));
+
+ return true;
+ }
+ }
+
+ // If script wasn't generated, it means XDR was either not provided or that it
+ // failed the decoding phase. Parse from text as before.
+ uint32_t srcLen = GetRawScriptsSize();
+ const unsigned char* compressed = compressedSources;
+ uint32_t compressedLen = GetCompressedSize();
+ auto src = cx->make_pod_array<char>(srcLen);
+ if (!src) {
+ return false;
+ }
+ if (!DecompressString(compressed, compressedLen,
+ reinterpret_cast<unsigned char*>(src.get()), srcLen)) {
+ return false;
+ }
+
+ JS::SourceText<mozilla::Utf8Unit> srcBuf;
+ if (!srcBuf.init(cx, std::move(src), srcLen)) {
+ return false;
+ }
+
+ Rooted<UniquePtr<frontend::CompilationInput>> input(
+ cx, cx->new_<frontend::CompilationInput>(options));
+ if (!input) {
+ return false;
+ }
+ frontend::NoScopeBindingCache scopeCache;
+ RefPtr<frontend::CompilationStencil> stencil =
+ frontend::CompileGlobalScriptToStencil(cx, &fc, cx->tempLifoAlloc(),
+ *input, &scopeCache, srcBuf,
+ ScopeKind::Global);
+ if (!stencil) {
+ return false;
+ }
+
+ // Serialize the stencil to XDR.
+ if (xdrWriter) {
+ JS::TranscodeBuffer xdrBuffer;
+ bool succeeded = false;
+ if (!stencil->serializeStencils(cx, *input, xdrBuffer, &succeeded)) {
+ return false;
+ }
+ if (!succeeded) {
+ JS_ReportErrorASCII(cx, "Encoding failure");
+ return false;
+ }
+
+ if (!xdrWriter(cx, xdrBuffer)) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(input->atomCache.empty());
+
+ MOZ_ASSERT(!hasSelfHostStencil());
+
+ // Move it to the runtime.
+ setSelfHostingStencil(&input, std::move(stencil));
+
+ return true;
+}
+
+void JSRuntime::setSelfHostingStencil(
+ MutableHandle<UniquePtr<frontend::CompilationInput>> input,
+ RefPtr<frontend::CompilationStencil>&& stencil) {
+ MOZ_ASSERT(!selfHostStencilInput_);
+ MOZ_ASSERT(!selfHostStencil_);
+
+ selfHostStencilInput_ = input.release();
+ selfHostStencil_ = stencil.forget().take();
+
+#ifdef DEBUG
+ CheckSelfHostingDataIsTenured(this);
+#endif
+}
+
+bool JSRuntime::initSelfHostingFromStencil(JSContext* cx) {
+ return InitSelfHostingFromStencil(
+ cx, cx->runtime()->selfHostStencilInput_->atomCache,
+ *cx->runtime()->selfHostStencil_);
+}
+
+void JSRuntime::finishSelfHosting() {
+ if (!parentRuntime) {
+ js_delete(selfHostStencilInput_.ref());
+ if (selfHostStencil_) {
+ // delete selfHostStencil_ by decrementing the ref-count of the last
+ // instance.
+ RefPtr<frontend::CompilationStencil> stencil;
+ *getter_AddRefs(stencil) = selfHostStencil_;
+ MOZ_ASSERT(stencil->refCount == 1);
+ }
+ }
+
+ selfHostStencilInput_ = nullptr;
+ selfHostStencil_ = nullptr;
+
+ selfHostScriptMap.ref().clear();
+}
+
+void JSRuntime::traceSelfHostingStencil(JSTracer* trc) {
+ if (selfHostStencilInput_.ref()) {
+ selfHostStencilInput_->trace(trc);
+ }
+ selfHostScriptMap.ref().trace(trc);
+}
+
+GeneratorKind JSRuntime::getSelfHostedFunctionGeneratorKind(
+ js::PropertyName* name) {
+ frontend::ScriptIndex index = getSelfHostedScriptIndexRange(name)->start;
+ auto flags = selfHostStencil().scriptExtra[index].immutableFlags;
+ return flags.hasFlag(js::ImmutableScriptFlagsEnum::IsGenerator)
+ ? GeneratorKind::Generator
+ : GeneratorKind::NotGenerator;
+}
+
+// Returns the ScriptSourceObject to use for cloned self-hosted scripts in the
+// current realm.
+ScriptSourceObject* js::SelfHostingScriptSourceObject(JSContext* cx) {
+ return GlobalObject::getOrCreateSelfHostingScriptSourceObject(cx,
+ cx->global());
+}
+
+/* static */
+ScriptSourceObject* GlobalObject::getOrCreateSelfHostingScriptSourceObject(
+ JSContext* cx, Handle<GlobalObject*> global) {
+ MOZ_ASSERT(cx->global() == global);
+
+ if (ScriptSourceObject* sso = global->data().selfHostingScriptSource) {
+ return sso;
+ }
+
+ CompileOptions options(cx);
+ FillSelfHostingCompileOptions(options);
+
+ RefPtr<ScriptSource> source(cx->new_<ScriptSource>());
+ if (!source) {
+ return nullptr;
+ }
+
+ Rooted<ScriptSourceObject*> sourceObject(cx);
+ {
+ AutoReportFrontendContext fc(cx);
+ if (!source->initFromOptions(&fc, options)) {
+ return nullptr;
+ }
+
+ sourceObject = ScriptSourceObject::create(cx, source.get());
+ if (!sourceObject) {
+ return nullptr;
+ }
+
+ JS::InstantiateOptions instantiateOptions(options);
+ if (!ScriptSourceObject::initFromOptions(cx, sourceObject,
+ instantiateOptions)) {
+ return nullptr;
+ }
+
+ global->data().selfHostingScriptSource.init(sourceObject);
+ }
+
+ return sourceObject;
+}
+
+bool JSRuntime::delazifySelfHostedFunction(JSContext* cx,
+ Handle<PropertyName*> name,
+ HandleFunction targetFun) {
+ MOZ_ASSERT(targetFun->isExtended());
+ MOZ_ASSERT(targetFun->hasSelfHostedLazyScript());
+
+ auto indexRange = *getSelfHostedScriptIndexRange(name);
+ auto& stencil = cx->runtime()->selfHostStencil();
+
+ if (!stencil.delazifySelfHostedFunction(
+ cx, cx->runtime()->selfHostStencilInput().atomCache, indexRange,
+ targetFun)) {
+ return false;
+ }
+
+ // Relazifiable self-hosted functions may be relazified later into a
+ // SelfHostedLazyScript, dropping the BaseScript entirely. This only applies
+ // to named function being delazified. Inner functions used by self-hosting
+ // are never relazified.
+ BaseScript* targetScript = targetFun->baseScript();
+ if (targetScript->isRelazifiable()) {
+ targetScript->setAllowRelazify();
+ }
+
+ return true;
+}
+
+mozilla::Maybe<frontend::ScriptIndexRange>
+JSRuntime::getSelfHostedScriptIndexRange(js::PropertyName* name) {
+ if (parentRuntime) {
+ return parentRuntime->getSelfHostedScriptIndexRange(name);
+ }
+ MOZ_ASSERT(name->isPermanentAndMayBeShared());
+ if (auto ptr = selfHostScriptMap.ref().readonlyThreadsafeLookup(name)) {
+ return mozilla::Some(ptr->value());
+ }
+ return mozilla::Nothing();
+}
+
+static bool GetComputedIntrinsic(JSContext* cx, Handle<PropertyName*> name,
+ MutableHandleValue vp) {
+ // If the intrinsic was not in hardcoded set, run the top-level of the
+ // selfhosted script. This will generate values and call `SetIntrinsic` to
+ // save them on a special "computed intrinsics holder". We then can check for
+ // our required values and cache on the normal intrinsics holder.
+
+ Rooted<NativeObject*> computedIntrinsicsHolder(
+ cx, cx->global()->getComputedIntrinsicsHolder());
+ if (!computedIntrinsicsHolder) {
+ auto computedIntrinsicHolderGuard = mozilla::MakeScopeExit(
+ [cx]() { cx->global()->setComputedIntrinsicsHolder(nullptr); });
+
+ // Instantiate a script in current realm from the shared Stencil.
+ JSRuntime* runtime = cx->runtime();
+ RootedScript script(
+ cx, runtime->selfHostStencil().instantiateSelfHostedTopLevelForRealm(
+ cx, runtime->selfHostStencilInput()));
+ if (!script) {
+ return false;
+ }
+
+ // Attach the computed intrinsics holder to the global now to capture
+ // generated values.
+ computedIntrinsicsHolder =
+ NewPlainObjectWithProto(cx, nullptr, TenuredObject);
+ if (!computedIntrinsicsHolder) {
+ return false;
+ }
+ cx->global()->setComputedIntrinsicsHolder(computedIntrinsicsHolder);
+
+ // Attempt to execute the top-level script. If they fails to run to
+ // successful completion, throw away the holder to avoid a partial
+ // initialization state.
+ if (!JS_ExecuteScript(cx, script)) {
+ return false;
+ }
+
+ // Successfully ran the self-host top-level in current realm, so these
+ // computed intrinsic values are now source of truth for the realm.
+ computedIntrinsicHolderGuard.release();
+ }
+
+ // Cache the individual intrinsic on the standard holder object so that we
+ // only have to look for it in one place when performing `GetIntrinsic`.
+ mozilla::Maybe<PropertyInfo> prop =
+ computedIntrinsicsHolder->lookup(cx, name);
+ MOZ_RELEASE_ASSERT(prop, "SelfHosted intrinsic not found");
+ RootedValue value(cx, computedIntrinsicsHolder->getSlot(prop->slot()));
+ return GlobalObject::addIntrinsicValue(cx, cx->global(), name, value);
+}
+
+bool JSRuntime::getSelfHostedValue(JSContext* cx, Handle<PropertyName*> name,
+ MutableHandleValue vp) {
+ // If the self-hosted value we want is a function in the stencil, instantiate
+ // a lazy self-hosted function for it. This is typical when a self-hosted
+ // function calls other self-hosted helper functions.
+ if (auto index = getSelfHostedScriptIndexRange(name)) {
+ JSFunction* fun =
+ cx->runtime()->selfHostStencil().instantiateSelfHostedLazyFunction(
+ cx, cx->runtime()->selfHostStencilInput().atomCache, index->start,
+ name);
+ if (!fun) {
+ return false;
+ }
+ vp.setObject(*fun);
+ return true;
+ }
+
+ return GetComputedIntrinsic(cx, name, vp);
+}
+
+void JSRuntime::assertSelfHostedFunctionHasCanonicalName(
+ Handle<PropertyName*> name) {
+#ifdef DEBUG
+ frontend::ScriptIndex index = getSelfHostedScriptIndexRange(name)->start;
+ MOZ_ASSERT(selfHostStencil().scriptData[index].hasSelfHostedCanonicalName());
+#endif
+}
+
+bool js::IsSelfHostedFunctionWithName(JSFunction* fun, JSAtom* name) {
+ return fun->isSelfHostedBuiltin() && fun->isExtended() &&
+ GetClonedSelfHostedFunctionName(fun) == name;
+}
+
+bool js::IsSelfHostedFunctionWithName(const Value& v, JSAtom* name) {
+ if (!v.isObject() || !v.toObject().is<JSFunction>()) {
+ return false;
+ }
+ JSFunction* fun = &v.toObject().as<JSFunction>();
+ return IsSelfHostedFunctionWithName(fun, name);
+}
+
+static_assert(
+ JSString::MAX_LENGTH <= INT32_MAX,
+ "StringIteratorNext in builtin/String.js assumes the stored index "
+ "into the string is an Int32Value");
+
+static_assert(JSString::MAX_LENGTH == MAX_STRING_LENGTH,
+ "JSString::MAX_LENGTH matches self-hosted constant for maximum "
+ "string length");
+
+static_assert(ARGS_LENGTH_MAX == MAX_ARGS_LENGTH,
+ "ARGS_LENGTH_MAX matches self-hosted constant for maximum "
+ "arguments length");
diff --git a/js/src/vm/SelfHosting.h b/js/src/vm/SelfHosting.h
new file mode 100644
index 0000000000..acd61fc132
--- /dev/null
+++ b/js/src/vm/SelfHosting.h
@@ -0,0 +1,287 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_SelfHosting_h_
+#define vm_SelfHosting_h_
+
+#include "NamespaceImports.h"
+
+#include "js/CallNonGenericMethod.h"
+#include "js/RootingAPI.h"
+#include "js/TypeDecls.h"
+
+// [SMDOC] Self-hosted JS
+//
+// Self-hosted JS allows implementing a part of the JS engine using JavaScript.
+//
+// This allows implementing new feature easily, and also enables JIT
+// compilation to achieve better performance, for example with higher order
+// functions. Self-hosted functions can be inlined in, and optimized with, the
+// JS caller functions.
+//
+// Self-hosted JS code is compiled into a stencil during the initialization of
+// the engine, and each function is instantiated into each global on demand.
+//
+// Self-hosted JS has several differences between regular JavaScript code,
+// for performance optimization, security, and some other reasons.
+//
+// # Always strict mode
+//
+// Unlike regular JavaScript, self-hosted JS code is always in strict mode.
+//
+// # Prohibited syntax
+//
+// * Regular expression `/foo/` cannot be used
+// * `obj.method(...)` and `obj[method](...)` style call cannot be used.
+// See `callFunction` below
+// * Object literal cannot contain duplicate property names
+// * `yield*` cannot be used
+//
+// # No lazy parsing
+//
+// Self-hosted JS does not use lazy/syntax parsing: bytecode is generated
+// eagerly for each function. However, we do instantiate the BaseScript lazily
+// from the stencil for JSFunctions created for self-hosted built-ins. See
+// `SelfHostedLazyScript` and `JSRuntime::selfHostedLazyScript`.
+//
+// # Extended function
+//
+// Functions with "$"-prefix in their name are allocated as extended function.
+// See "SetCanonicalName" below.
+//
+// # Intrinsic helper functions
+//
+// Self-hosted JS has access to special functions that can interact with
+// native code or internal representation of JS values and objects.
+//
+// See `intrinsic_functions` array in SelfHosting.cpp.
+//
+// # Stack Frame
+//
+// Stack frame inside self-hosted JS is hidden from Error.prototype.stack by
+// default, to hide the internal from user code.
+//
+// During debugging self-hosted JS code, `MOZ_SHOW_ALL_JS_FRAMES` environment
+// variable can be used to expose those frames
+//
+// # Debugger interaction
+//
+// Self-hosted JS is hidden from debugger, and no source notes or breakpoint
+// is generated.
+//
+// Most function calls inside self-hosted JS are hidden from Debugger's
+// `onNativeCall` hook, except for the following (see below for each):
+// * `callContentFunction`
+// * `constructContentFunction`
+// * `allowContentIter`
+// * `allowContentIterWith`
+//
+// # XDR cache
+//
+// Compiling self-hosted JS code takes some time.
+// To improve the startup performance, the bytecode for self-hosted JS code
+// can be saved as XDR, and used by other instance. This is used to speed up
+// JS shell tests and Firefox content process startup.
+//
+// See `JSRuntime::initSelfHostingStencil` function.
+//
+// # Special functions
+//
+// Self-hosted JS code has special functions, to emit special bytecode
+// sequence, or directly operate on internals:
+//
+// callFunction(callee, thisV, arg0, ...)
+// Call `callee` function with `thisV` as "this" value, passing
+// arg0, ..., as arguments.
+// This is used when "this" value is not `undefined.
+//
+// `obj.method(...)` syntax is forbidden in self-hosted JS, to avoid
+// accidentally exposing the internal, or allowing user code to modify the
+// behavior.
+//
+// If the `callee` can be user-provided, `callContentFunction` must be
+// used instead.
+//
+// callContentFunction(callee, thisV, arg0, ...)
+// Same as `callFunction`, but this must be used when calling possibly
+// user-provided functions, even if "this" value is `undefined`.
+//
+// This exposes function calls to debuggers, using `JSOp::CallContent`
+// opcode.
+//
+// constructContentFunction(callee, newTarget, arg0, ...)
+// Construct `callee` function using `newTarget` as `new.target`.
+// This must be used when constructing possibly user-provided functions.
+//
+// This exposes constructs to debuggers, using `JSOp::NewContent` opcode.
+//
+// allowContentIter(iterable)
+// Iteration such as for-of and spread on user-provided value is
+// prohibited inside self-hosted JS by default.
+//
+// `allowContentIter` marks iteration allowed for given possibly
+// user-provided iterable.
+//
+// This exposes implicit function calls around iteration to debuggers,
+// using `JSOp::CallContentIter` opcode.
+//
+// Used in the following contexts:
+//
+// for (var item of allowContentIter(iterable)) { ... }
+// [...allowContentIter(iterable)]
+//
+// allowContentIterWith(iterable, iteratorFunc)
+// Special form of `allowContentIter`, where `iterable[Symbol.iterator]` is
+// already retrieved.
+//
+// This directly uses `iteratorFunc` instead of accessing
+// `iterable[Symbol.iterator]` again inside for-of bytecode.
+//
+// for (var item of allowContentIterWith(iterable, iteratorFunc)) { ... }
+//
+// DefineDataProperty(obj, key, value)
+// Initialize `obj`'s `key` property with `value`, like
+// `Object.defineProperty(obj, key, {value})`, using `JSOp::InitElem`
+// opcode. This is almost always better than `obj[key] = value` because it
+// ignores setters and other properties on the prototype chain.
+//
+// hasOwn(key, obj)
+// Return `true` if `obj` has an own `key` property, using `JSOp::HasOwn`
+// opcode.
+//
+// getPropertySuper(obj, key, receiver)
+// Return `obj.[[Get]](key, receiver)`, using `JSOp::GetElemSuper` opcode.
+//
+// ToNumeric(v)
+// Convert `v` to number, using `JSOp::ToNumeric` opcode
+//
+// ToString(v)
+// Convert `v` to string, `JSOp::ToString` opcode
+//
+// GetBuiltinConstructor(name)
+// Return built-in constructor for `name`, e.g. `"Array"`, using
+// `JSOp::BuiltinObject` opcode.
+//
+// GetBuiltinPrototype(name)
+// Return built-in prototype for `name`, e.g. `"RegExp"`, using
+// `JSOp::BuiltinObject` opcode.
+//
+// GetBuiltinSymbol(name)
+// Return built-in symbol `Symbol[name]`, using `JSOp::Symbol` opcode.
+//
+// SetIsInlinableLargeFunction(fun)
+// Mark the large function `fun` inlineable.
+// `fun` must be the last function declaration before this call.
+//
+// SetCanonicalName(fun)
+// Set canonical name for the function `fun`.
+// `fun` must be the last function declaration before this call, and also
+// its function name must be prefixed with "$", to make it extended
+// function and store the original function name in the extended slot.
+//
+// UnsafeGetReservedSlot(obj, slot)
+// UnsafeGetObjectFromReservedSlot(obj, slot)
+// UnsafeGetInt32FromReservedSlot(obj, slot)
+// UnsafeGetStringFromReservedSlot(obj, slot)
+// UnsafeGetBooleanFromReservedSlot(obj, slot)
+// Get `obj`'s reserved slot specified by integer value `slot`.
+// They are intrinsic helper functions, and also optimized during JIT
+// compilation.
+//
+// UnsafeSetReservedSlot(obj, slot, value)
+// Set `obj`'s reserved slot specified by integer value `slot` to `value`.
+// This is an intrinsic helper function, and also optimized during JIT
+// compilation.
+//
+// resumeGenerator(gen, value, kind)
+// Resume generator `gen`, using `kind`, which is one of "next", "throw",
+// or "return", pasing `value` as parameter, using `JSOp::Resume` opcode.
+//
+// forceInterpreter()
+// Force interpreter execution for this function, using
+// `JSOp::ForceInterpreter` opcode.
+// This must be the first statement inside the function.
+
+namespace JS {
+class JS_PUBLIC_API CompileOptions;
+}
+
+namespace js {
+
+class AnyInvokeArgs;
+class PropertyName;
+class ScriptSourceObject;
+
+ScriptSourceObject* SelfHostingScriptSourceObject(JSContext* cx);
+
+/*
+ * Check whether the given JSFunction or Value is a self-hosted function whose
+ * self-hosted name is the given name.
+ */
+bool IsSelfHostedFunctionWithName(JSFunction* fun, JSAtom* name);
+bool IsSelfHostedFunctionWithName(const Value& v, JSAtom* name);
+
+/*
+ * Returns the name of the cloned function's binding in the self-hosted global.
+ *
+ * This returns a non-null value only when this is a top level function
+ * declaration in the self-hosted global.
+ */
+PropertyName* GetClonedSelfHostedFunctionName(const JSFunction* fun);
+void SetClonedSelfHostedFunctionName(JSFunction* fun, PropertyName* name);
+
+constexpr char ExtendedUnclonedSelfHostedFunctionNamePrefix = '$';
+
+/*
+ * Uncloned self-hosted functions with `$` prefix are allocated as
+ * extended function, to store the original name in `_SetCanonicalName`.
+ */
+bool IsExtendedUnclonedSelfHostedFunctionName(JSAtom* name);
+
+void SetUnclonedSelfHostedCanonicalName(JSFunction* fun, JSAtom* name);
+
+bool IsCallSelfHostedNonGenericMethod(NativeImpl impl);
+
+bool ReportIncompatibleSelfHostedMethod(JSContext* cx, Handle<Value> thisValue);
+
+/* Get the compile options used when compiling self hosted code. */
+void FillSelfHostingCompileOptions(JS::CompileOptions& options);
+
+const JSFunctionSpec* FindIntrinsicSpec(PropertyName* name);
+
+#ifdef DEBUG
+/*
+ * Calls a self-hosted function by name.
+ *
+ * This function is only available in debug mode, because it always atomizes
+ * its |name| parameter. Use the alternative function below in non-debug code.
+ */
+bool CallSelfHostedFunction(JSContext* cx, char const* name, HandleValue thisv,
+ const AnyInvokeArgs& args, MutableHandleValue rval);
+#endif
+
+/*
+ * Calls a self-hosted function by name.
+ */
+bool CallSelfHostedFunction(JSContext* cx, Handle<PropertyName*> name,
+ HandleValue thisv, const AnyInvokeArgs& args,
+ MutableHandleValue rval);
+
+bool intrinsic_NewArrayIterator(JSContext* cx, unsigned argc, JS::Value* vp);
+
+bool intrinsic_NewStringIterator(JSContext* cx, unsigned argc, JS::Value* vp);
+
+bool intrinsic_NewRegExpStringIterator(JSContext* cx, unsigned argc,
+ JS::Value* vp);
+
+#ifdef ENABLE_RECORD_TUPLE
+bool IsTupleUnchecked(JSContext* cx, const CallArgs& args);
+bool intrinsic_IsTuple(JSContext* cx, unsigned argc, JS::Value* vp);
+#endif
+
+} /* namespace js */
+
+#endif /* vm_SelfHosting_h_ */
diff --git a/js/src/vm/Shape-inl.h b/js/src/vm/Shape-inl.h
new file mode 100644
index 0000000000..ae8c4bb844
--- /dev/null
+++ b/js/src/vm/Shape-inl.h
@@ -0,0 +1,105 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Shape_inl_h
+#define vm_Shape_inl_h
+
+#include "vm/Shape.h"
+
+#include "vm/JSObject.h"
+#include "vm/PropertyResult.h"
+
+#include "gc/GCContext-inl.h"
+#include "gc/Marking-inl.h"
+#include "vm/PropMap-inl.h"
+
+namespace js {
+
+template <class ObjectSubclass>
+/* static */ inline bool SharedShape::ensureInitialCustomShape(
+ JSContext* cx, Handle<ObjectSubclass*> obj) {
+ static_assert(std::is_base_of_v<JSObject, ObjectSubclass>,
+ "ObjectSubclass must be a subclass of JSObject");
+
+ // If the provided object has a non-empty shape, it was given the cached
+ // initial shape when created: nothing to do.
+ if (!obj->empty()) {
+ return true;
+ }
+
+ // Ensure the initial shape isn't collected under assignInitialShape, to
+ // simplify insertInitialShape.
+ Rooted<Shape*> emptyShape(cx, obj->shape());
+
+ // If no initial shape was assigned, do so.
+ Rooted<SharedShape*> shape(cx, ObjectSubclass::assignInitialShape(cx, obj));
+ if (!shape) {
+ return false;
+ }
+ MOZ_ASSERT(!obj->empty());
+
+ // Cache the initial shape, so that future instances will begin life with that
+ // shape.
+ SharedShape::insertInitialShape(cx, shape);
+ return true;
+}
+
+MOZ_ALWAYS_INLINE PropMap* NativeShape::lookup(JSContext* cx, PropertyKey key,
+ uint32_t* index) {
+ uint32_t len = propMapLength();
+ return len > 0 ? propMap_->lookup(cx, len, key, index) : nullptr;
+}
+
+MOZ_ALWAYS_INLINE PropMap* NativeShape::lookupPure(PropertyKey key,
+ uint32_t* index) {
+ uint32_t len = propMapLength();
+ return len > 0 ? propMap_->lookupPure(len, key, index) : nullptr;
+}
+
+inline void Shape::purgeCache(JS::GCContext* gcx) {
+ if (cache_.isShapeSetForAdd()) {
+ gcx->delete_(this, cache_.toShapeSetForAdd(), MemoryUse::ShapeSetForAdd);
+ }
+ cache_.setNone();
+}
+
+inline void Shape::finalize(JS::GCContext* gcx) {
+ if (!cache_.isNone()) {
+ purgeCache(gcx);
+ }
+ if (isWasmGC()) {
+ asWasmGC().finalize(gcx);
+ }
+}
+
+inline void WasmGCShape::init() { recGroup_->AddRef(); }
+
+inline void WasmGCShape::finalize(JS::GCContext* gcx) { recGroup_->Release(); }
+
+inline SharedPropMap* SharedShape::propMapMaybeForwarded() const {
+ MOZ_ASSERT(isShared());
+ PropMap* propMap = propMap_;
+ return propMap ? MaybeForwarded(propMap)->asShared() : nullptr;
+}
+
+static inline JS::PropertyAttributes GetPropertyAttributes(
+ JSObject* obj, PropertyResult prop) {
+ MOZ_ASSERT(obj->is<NativeObject>());
+
+ if (prop.isDenseElement()) {
+ return obj->as<NativeObject>().getElementsHeader()->elementAttributes();
+ }
+ if (prop.isTypedArrayElement()) {
+ return {JS::PropertyAttribute::Configurable,
+ JS::PropertyAttribute::Enumerable, JS::PropertyAttribute::Writable};
+ }
+
+ return prop.propertyInfo().propAttributes();
+}
+
+} /* namespace js */
+
+#endif /* vm_Shape_inl_h */
diff --git a/js/src/vm/Shape.cpp b/js/src/vm/Shape.cpp
new file mode 100644
index 0000000000..94315b433a
--- /dev/null
+++ b/js/src/vm/Shape.cpp
@@ -0,0 +1,1484 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/Shape-inl.h"
+
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/PodOperations.h"
+
+#include "gc/HashUtil.h"
+#include "js/friend/WindowProxy.h" // js::IsWindow
+#include "js/HashTable.h"
+#include "js/UniquePtr.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/ShapeZone.h"
+#include "vm/Watchtower.h"
+
+#include "gc/StableCellHasher-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+
+using mozilla::CeilingLog2Size;
+using mozilla::PodZero;
+
+using JS::AutoCheckCannotGC;
+
+/* static */
+bool Shape::replaceShape(JSContext* cx, HandleObject obj,
+ ObjectFlags objectFlags, TaggedProto proto,
+ uint32_t nfixed) {
+ Shape* newShape;
+ switch (obj->shape()->kind()) {
+ case Kind::Shared: {
+ Handle<NativeObject*> nobj = obj.as<NativeObject>();
+ if (nobj->shape()->propMap()) {
+ Rooted<BaseShape*> base(cx, obj->shape()->base());
+ if (proto != base->proto()) {
+ Rooted<TaggedProto> protoRoot(cx, proto);
+ base = BaseShape::get(cx, base->clasp(), base->realm(), protoRoot);
+ if (!base) {
+ return false;
+ }
+ }
+ Rooted<SharedPropMap*> map(cx, nobj->sharedShape()->propMap());
+ uint32_t mapLength = nobj->shape()->propMapLength();
+ newShape = SharedShape::getPropMapShape(cx, base, nfixed, map,
+ mapLength, objectFlags);
+ } else {
+ newShape = SharedShape::getInitialShape(
+ cx, obj->shape()->getObjectClass(), obj->shape()->realm(), proto,
+ nfixed, objectFlags);
+ }
+ break;
+ }
+ case Kind::Dictionary: {
+ Handle<NativeObject*> nobj = obj.as<NativeObject>();
+
+ Rooted<BaseShape*> base(cx, nobj->shape()->base());
+ if (proto != base->proto()) {
+ Rooted<TaggedProto> protoRoot(cx, proto);
+ base = BaseShape::get(cx, nobj->getClass(), nobj->realm(), protoRoot);
+ if (!base) {
+ return false;
+ }
+ }
+
+ Rooted<DictionaryPropMap*> map(cx, nobj->dictionaryShape()->propMap());
+ uint32_t mapLength = nobj->shape()->propMapLength();
+ newShape =
+ DictionaryShape::new_(cx, base, objectFlags, nfixed, map, mapLength);
+ break;
+ }
+ case Kind::Proxy:
+ MOZ_ASSERT(nfixed == 0);
+ newShape =
+ ProxyShape::getShape(cx, obj->shape()->getObjectClass(),
+ obj->shape()->realm(), proto, objectFlags);
+ break;
+ case Kind::WasmGC:
+ MOZ_ASSERT(nfixed == 0);
+ const wasm::RecGroup* recGroup = obj->shape()->asWasmGC().recGroup();
+ newShape = WasmGCShape::getShape(cx, obj->shape()->getObjectClass(),
+ obj->shape()->realm(), proto, recGroup,
+ objectFlags);
+ break;
+ }
+ if (!newShape) {
+ return false;
+ }
+
+ obj->setShape(newShape);
+ return true;
+}
+
+/* static */
+bool js::NativeObject::toDictionaryMode(JSContext* cx,
+ Handle<NativeObject*> obj) {
+ MOZ_ASSERT(!obj->inDictionaryMode());
+ MOZ_ASSERT(cx->isInsideCurrentCompartment(obj));
+
+ Rooted<NativeShape*> shape(cx, obj->shape());
+ uint32_t span = obj->slotSpan();
+
+ uint32_t mapLength = shape->propMapLength();
+ MOZ_ASSERT(mapLength > 0, "shouldn't convert empty object to dictionary");
+
+ // Clone the shared property map to an unshared dictionary map.
+ Rooted<SharedPropMap*> map(cx, shape->propMap()->asShared());
+ Rooted<DictionaryPropMap*> dictMap(
+ cx, SharedPropMap::toDictionaryMap(cx, map, mapLength));
+ if (!dictMap) {
+ return false;
+ }
+
+ // Allocate and use a new dictionary shape.
+ Rooted<BaseShape*> base(cx, shape->base());
+ shape = DictionaryShape::new_(cx, base, shape->objectFlags(),
+ shape->numFixedSlots(), dictMap, mapLength);
+ if (!shape) {
+ return false;
+ }
+ obj->setShape(shape);
+
+ MOZ_ASSERT(obj->inDictionaryMode());
+ obj->setDictionaryModeSlotSpan(span);
+
+ return true;
+}
+
+namespace js {
+
+class MOZ_RAII AutoCheckShapeConsistency {
+#ifdef DEBUG
+ Handle<NativeObject*> obj_;
+#endif
+
+ public:
+ explicit AutoCheckShapeConsistency(Handle<NativeObject*> obj)
+#ifdef DEBUG
+ : obj_(obj)
+#endif
+ {
+ }
+
+#ifdef DEBUG
+ ~AutoCheckShapeConsistency() { obj_->checkShapeConsistency(); }
+#endif
+};
+
+} // namespace js
+
+/* static */ MOZ_ALWAYS_INLINE bool
+NativeObject::maybeConvertToDictionaryForAdd(JSContext* cx,
+ Handle<NativeObject*> obj) {
+ if (obj->inDictionaryMode()) {
+ return true;
+ }
+ SharedPropMap* map = obj->sharedShape()->propMap();
+ if (!map) {
+ return true;
+ }
+ if (MOZ_LIKELY(!map->shouldConvertToDictionaryForAdd())) {
+ return true;
+ }
+ return toDictionaryMode(cx, obj);
+}
+
+static void AssertValidCustomDataProp(NativeObject* obj, PropertyFlags flags) {
+ // We only support custom data properties on ArrayObject and ArgumentsObject.
+ // The mechanism is deprecated so we don't want to add new uses.
+ MOZ_ASSERT(flags.isCustomDataProperty());
+ MOZ_ASSERT(!flags.isAccessorProperty());
+ MOZ_ASSERT(obj->is<ArrayObject>() || obj->is<ArgumentsObject>());
+}
+
+/* static */
+bool NativeObject::addCustomDataProperty(JSContext* cx,
+ Handle<NativeObject*> obj, HandleId id,
+ PropertyFlags flags) {
+ MOZ_ASSERT(!id.isVoid());
+ MOZ_ASSERT(!id.isPrivateName());
+ MOZ_ASSERT(!obj->containsPure(id));
+
+ AutoCheckShapeConsistency check(obj);
+ AssertValidCustomDataProp(obj, flags);
+
+ if (!Watchtower::watchPropertyAdd(cx, obj, id)) {
+ return false;
+ }
+
+ if (!maybeConvertToDictionaryForAdd(cx, obj)) {
+ return false;
+ }
+
+ ObjectFlags objectFlags = obj->shape()->objectFlags();
+ const JSClass* clasp = obj->shape()->getObjectClass();
+
+ if (obj->inDictionaryMode()) {
+ // First generate a new dictionary shape so that the map can be mutated
+ // without having to worry about OOM conditions.
+ if (!NativeObject::generateNewDictionaryShape(cx, obj)) {
+ return false;
+ }
+
+ Rooted<DictionaryPropMap*> map(cx, obj->dictionaryShape()->propMap());
+ uint32_t mapLength = obj->shape()->propMapLength();
+ if (!DictionaryPropMap::addProperty(cx, clasp, &map, &mapLength, id, flags,
+ SHAPE_INVALID_SLOT, &objectFlags)) {
+ return false;
+ }
+
+ obj->dictionaryShape()->updateNewShape(objectFlags, map, mapLength);
+ return true;
+ }
+
+ Rooted<SharedPropMap*> map(cx, obj->sharedShape()->propMap());
+ uint32_t mapLength = obj->shape()->propMapLength();
+ if (!SharedPropMap::addCustomDataProperty(cx, clasp, &map, &mapLength, id,
+ flags, &objectFlags)) {
+ return false;
+ }
+
+ Shape* shape = SharedShape::getPropMapShape(cx, obj->shape()->base(),
+ obj->shape()->numFixedSlots(),
+ map, mapLength, objectFlags);
+ if (!shape) {
+ return false;
+ }
+
+ obj->setShape(shape);
+ return true;
+}
+
+static ShapeSetForAdd* MakeShapeSetForAdd(SharedShape* shape1,
+ SharedShape* shape2) {
+ MOZ_ASSERT(shape1 != shape2);
+ MOZ_ASSERT(shape1->propMapLength() == shape2->propMapLength());
+
+ auto hash = MakeUnique<ShapeSetForAdd>();
+ if (!hash || !hash->reserve(2)) {
+ return nullptr;
+ }
+
+ PropertyInfoWithKey prop = shape1->lastProperty();
+ hash->putNewInfallible(ShapeForAddHasher::Lookup(prop.key(), prop.flags()),
+ shape1);
+
+ prop = shape2->lastProperty();
+ hash->putNewInfallible(ShapeForAddHasher::Lookup(prop.key(), prop.flags()),
+ shape2);
+
+ return hash.release();
+}
+
+static MOZ_ALWAYS_INLINE SharedShape* LookupShapeForAdd(Shape* shape,
+ PropertyKey key,
+ PropertyFlags flags,
+ uint32_t* slot) {
+ ShapeCachePtr cache = shape->cache();
+
+ if (cache.isSingleShapeForAdd()) {
+ SharedShape* newShape = cache.toSingleShapeForAdd();
+ if (newShape->lastPropertyMatchesForAdd(key, flags, slot)) {
+ return newShape;
+ }
+ return nullptr;
+ }
+
+ if (cache.isShapeSetForAdd()) {
+ ShapeSetForAdd* set = cache.toShapeSetForAdd();
+ ShapeForAddHasher::Lookup lookup(key, flags);
+ if (auto p = set->lookup(lookup)) {
+ SharedShape* newShape = *p;
+ *slot = newShape->lastProperty().slot();
+ return newShape;
+ }
+ return nullptr;
+ }
+
+ MOZ_ASSERT(!cache.isForAdd());
+ return nullptr;
+}
+
+// Add shapes with a non-None ShapeCachePtr to the shapesWithCache list so that
+// these caches can be discarded on GC.
+static bool RegisterShapeCache(JSContext* cx, Shape* shape) {
+ ShapeCachePtr cache = shape->cache();
+ if (!cache.isNone()) {
+ // Already registered this shape.
+ return true;
+ }
+ return cx->zone()->shapeZone().shapesWithCache.append(shape);
+}
+
+/* static */
+bool NativeObject::addProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, PropertyFlags flags,
+ uint32_t* slot) {
+ AutoCheckShapeConsistency check(obj);
+ MOZ_ASSERT(!flags.isCustomDataProperty(),
+ "Use addCustomDataProperty for custom data properties");
+
+ // The object must not contain a property named |id|. The object must be
+ // extensible, but allow private fields and sparsifying dense elements.
+ MOZ_ASSERT(!id.isVoid());
+ MOZ_ASSERT(!obj->containsPure(id));
+ MOZ_ASSERT_IF(!id.isPrivateName(),
+ obj->isExtensible() ||
+ (id.isInt() && obj->containsDenseElement(id.toInt())) ||
+ // R&T wrappers are non-extensible, but we still want to be
+ // able to lazily resolve their properties. We can
+ // special-case them to allow doing so.
+ IF_RECORD_TUPLE(IsExtendedPrimitiveWrapper(*obj), false));
+
+ if (!Watchtower::watchPropertyAdd(cx, obj, id)) {
+ return false;
+ }
+
+ if (!maybeConvertToDictionaryForAdd(cx, obj)) {
+ return false;
+ }
+
+ if (auto* shape = LookupShapeForAdd(obj->shape(), id, flags, slot)) {
+ return obj->setShapeAndAddNewSlot(cx, shape, *slot);
+ }
+
+ if (obj->inDictionaryMode()) {
+ // First generate a new dictionary shape so that the map and shape can be
+ // mutated without having to worry about OOM conditions.
+ if (!NativeObject::generateNewDictionaryShape(cx, obj)) {
+ return false;
+ }
+ if (!allocDictionarySlot(cx, obj, slot)) {
+ return false;
+ }
+
+ ObjectFlags objectFlags = obj->shape()->objectFlags();
+ const JSClass* clasp = obj->shape()->getObjectClass();
+
+ Rooted<DictionaryPropMap*> map(cx, obj->shape()->propMap()->asDictionary());
+ uint32_t mapLength = obj->shape()->propMapLength();
+ if (!DictionaryPropMap::addProperty(cx, clasp, &map, &mapLength, id, flags,
+ *slot, &objectFlags)) {
+ return false;
+ }
+
+ obj->dictionaryShape()->updateNewShape(objectFlags, map, mapLength);
+ return true;
+ }
+
+ ObjectFlags objectFlags = obj->shape()->objectFlags();
+ const JSClass* clasp = obj->shape()->getObjectClass();
+
+ Rooted<SharedPropMap*> map(cx, obj->sharedShape()->propMap());
+ uint32_t mapLength = obj->shape()->propMapLength();
+
+ if (!SharedPropMap::addProperty(cx, clasp, &map, &mapLength, id, flags,
+ &objectFlags, slot)) {
+ return false;
+ }
+
+ bool allocatedNewShape;
+ SharedShape* newShape = SharedShape::getPropMapShape(
+ cx, obj->shape()->base(), obj->shape()->numFixedSlots(), map, mapLength,
+ objectFlags, &allocatedNewShape);
+ if (!newShape) {
+ return false;
+ }
+
+ Shape* oldShape = obj->shape();
+ if (!obj->setShapeAndAddNewSlot(cx, newShape, *slot)) {
+ return false;
+ }
+
+ // Add the new shape to the old shape's shape cache, to optimize this shape
+ // transition. Don't do this if we just allocated a new shape, because that
+ // suggests this may not be a hot transition that would benefit from the
+ // cache.
+
+ if (allocatedNewShape) {
+ return true;
+ }
+
+ if (!RegisterShapeCache(cx, oldShape)) {
+ // Ignore OOM, the cache is just an optimization.
+ return true;
+ }
+
+ ShapeCachePtr& cache = oldShape->cacheRef();
+ if (!cache.isForAdd()) {
+ cache.setSingleShapeForAdd(newShape);
+ } else if (cache.isSingleShapeForAdd()) {
+ SharedShape* prevShape = cache.toSingleShapeForAdd();
+ if (ShapeSetForAdd* set = MakeShapeSetForAdd(prevShape, newShape)) {
+ cache.setShapeSetForAdd(set);
+ AddCellMemory(oldShape, sizeof(ShapeSetForAdd),
+ MemoryUse::ShapeSetForAdd);
+ }
+ } else {
+ ShapeForAddHasher::Lookup lookup(id, flags);
+ (void)cache.toShapeSetForAdd()->putNew(lookup, newShape);
+ }
+
+ return true;
+}
+
+void Shape::maybeCacheIterator(JSContext* cx, PropertyIteratorObject* iter) {
+ if (!cache().isNone() && !cache().isIterator()) {
+ // If we're already caching other shape data, skip caching the iterator.
+ return;
+ }
+ if (MOZ_UNLIKELY(!RegisterShapeCache(cx, this))) {
+ // Ignore OOM. The cache is just an optimization.
+ return;
+ }
+ cacheRef().setIterator(iter);
+}
+
+/* static */
+bool NativeObject::addPropertyInReservedSlot(JSContext* cx,
+ Handle<NativeObject*> obj,
+ HandleId id, uint32_t slot,
+ PropertyFlags flags) {
+ AutoCheckShapeConsistency check(obj);
+ MOZ_ASSERT(!flags.isCustomDataProperty(),
+ "Use addCustomDataProperty for custom data properties");
+
+ // The slot must be a reserved slot.
+ MOZ_ASSERT(slot < JSCLASS_RESERVED_SLOTS(obj->getClass()));
+
+ // The object must not contain a property named |id| and must be extensible.
+ MOZ_ASSERT(!id.isVoid());
+ MOZ_ASSERT(!obj->containsPure(id));
+ MOZ_ASSERT(!id.isPrivateName());
+ MOZ_ASSERT(obj->isExtensible());
+
+ // The object must not be in dictionary mode. This simplifies the code below.
+ MOZ_ASSERT(!obj->inDictionaryMode());
+
+ // We don't need to call Watchtower::watchPropertyAdd here because this isn't
+ // used for any watched objects.
+ MOZ_ASSERT(!Watchtower::watchesPropertyAdd(obj));
+
+ ObjectFlags objectFlags = obj->shape()->objectFlags();
+ const JSClass* clasp = obj->shape()->getObjectClass();
+
+ Rooted<SharedPropMap*> map(cx, obj->sharedShape()->propMap());
+ uint32_t mapLength = obj->shape()->propMapLength();
+ if (!SharedPropMap::addPropertyInReservedSlot(cx, clasp, &map, &mapLength, id,
+ flags, slot, &objectFlags)) {
+ return false;
+ }
+
+ Shape* shape = SharedShape::getPropMapShape(cx, obj->shape()->base(),
+ obj->shape()->numFixedSlots(),
+ map, mapLength, objectFlags);
+ if (!shape) {
+ return false;
+ }
+ obj->setShape(shape);
+
+ MOZ_ASSERT(obj->getLastProperty().slot() == slot);
+ return true;
+}
+
+/*
+ * Assert some invariants that should hold when changing properties. It's the
+ * responsibility of the callers to ensure these hold.
+ */
+static void AssertCanChangeFlags(PropertyInfo prop, PropertyFlags flags) {
+#ifdef DEBUG
+ if (prop.configurable()) {
+ return;
+ }
+
+ // A non-configurable property must stay non-configurable.
+ MOZ_ASSERT(!flags.configurable());
+
+ // Reject attempts to turn a non-configurable data property into an accessor
+ // or custom data property.
+ MOZ_ASSERT_IF(prop.isDataProperty(), flags.isDataProperty());
+
+ // Reject attempts to turn a non-configurable accessor property into a data
+ // property or custom data property.
+ MOZ_ASSERT_IF(prop.isAccessorProperty(), flags.isAccessorProperty());
+#endif
+}
+
+static void AssertValidArrayIndex(NativeObject* obj, jsid id) {
+#ifdef DEBUG
+ if (obj->is<ArrayObject>()) {
+ ArrayObject* arr = &obj->as<ArrayObject>();
+ uint32_t index;
+ if (IdIsIndex(id, &index)) {
+ MOZ_ASSERT(index < arr->length() || arr->lengthIsWritable());
+ }
+ }
+#endif
+}
+
+/* static */
+bool NativeObject::changeProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, PropertyFlags flags,
+ uint32_t* slotOut) {
+ MOZ_ASSERT(!id.isVoid());
+
+ AutoCheckShapeConsistency check(obj);
+ AssertValidArrayIndex(obj, id);
+ MOZ_ASSERT(!flags.isCustomDataProperty(),
+ "Use changeCustomDataPropAttributes for custom data properties");
+
+ if (!Watchtower::watchPropertyChange(cx, obj, id, flags)) {
+ return false;
+ }
+
+ Rooted<PropMap*> map(cx, obj->shape()->propMap());
+ uint32_t mapLength = obj->shape()->propMapLength();
+
+ uint32_t propIndex;
+ Rooted<PropMap*> propMap(cx, map->lookup(cx, mapLength, id, &propIndex));
+ MOZ_ASSERT(propMap);
+
+ ObjectFlags objectFlags = obj->shape()->objectFlags();
+
+ PropertyInfo oldProp = propMap->getPropertyInfo(propIndex);
+ AssertCanChangeFlags(oldProp, flags);
+
+ if (oldProp.isAccessorProperty()) {
+ objectFlags.setFlag(ObjectFlag::HadGetterSetterChange);
+ }
+
+ // If the property flags are not changing, the only thing we have to do is
+ // update the object flags. This prevents a dictionary mode conversion below.
+ if (oldProp.flags() == flags) {
+ *slotOut = oldProp.slot();
+ if (objectFlags == obj->shape()->objectFlags()) {
+ return true;
+ }
+ return Shape::replaceShape(cx, obj, objectFlags, obj->shape()->proto(),
+ obj->shape()->numFixedSlots());
+ }
+
+ const JSClass* clasp = obj->shape()->getObjectClass();
+
+ if (map->isShared()) {
+ // Fast path for changing the last property in a SharedPropMap. Call
+ // getPrevious to "remove" the last property and then call addProperty
+ // to re-add the last property with the new flags.
+ if (propMap == map && propIndex == mapLength - 1) {
+ MOZ_ASSERT(obj->getLastProperty().key() == id);
+
+ Rooted<SharedPropMap*> sharedMap(cx, map->asShared());
+ SharedPropMap::getPrevious(&sharedMap, &mapLength);
+
+ if (MOZ_LIKELY(oldProp.hasSlot())) {
+ *slotOut = oldProp.slot();
+ if (!SharedPropMap::addPropertyWithKnownSlot(cx, clasp, &sharedMap,
+ &mapLength, id, flags,
+ *slotOut, &objectFlags)) {
+ return false;
+ }
+ } else {
+ if (!SharedPropMap::addProperty(cx, clasp, &sharedMap, &mapLength, id,
+ flags, &objectFlags, slotOut)) {
+ return false;
+ }
+ }
+
+ SharedShape* newShape = SharedShape::getPropMapShape(
+ cx, obj->shape()->base(), obj->shape()->numFixedSlots(), sharedMap,
+ mapLength, objectFlags);
+ if (!newShape) {
+ return false;
+ }
+
+ if (MOZ_LIKELY(oldProp.hasSlot())) {
+ MOZ_ASSERT(obj->sharedShape()->slotSpan() == newShape->slotSpan());
+ obj->setShape(newShape);
+ return true;
+ }
+ return obj->setShapeAndAddNewSlot(cx, newShape, *slotOut);
+ }
+
+ // Changing a non-last property. Switch to dictionary mode and relookup
+ // pointers for the new dictionary map.
+ if (!NativeObject::toDictionaryMode(cx, obj)) {
+ return false;
+ }
+ map = obj->shape()->propMap();
+ propMap = map->lookup(cx, mapLength, id, &propIndex);
+ MOZ_ASSERT(propMap);
+ } else {
+ if (!NativeObject::generateNewDictionaryShape(cx, obj)) {
+ return false;
+ }
+ }
+
+ // The object has a new dictionary shape (see toDictionaryMode and
+ // generateNewDictionaryShape calls above), so we can mutate the map and shape
+ // in place.
+
+ MOZ_ASSERT(map->isDictionary());
+ MOZ_ASSERT(propMap->isDictionary());
+
+ uint32_t slot = oldProp.hasSlot() ? oldProp.slot() : SHAPE_INVALID_SLOT;
+ if (slot == SHAPE_INVALID_SLOT) {
+ if (!allocDictionarySlot(cx, obj, &slot)) {
+ return false;
+ }
+ }
+
+ propMap->asDictionary()->changeProperty(cx, clasp, propIndex, flags, slot,
+ &objectFlags);
+ obj->dictionaryShape()->setObjectFlagsOfNewShape(objectFlags);
+
+ *slotOut = slot;
+ return true;
+}
+
+/* static */
+bool NativeObject::changeCustomDataPropAttributes(JSContext* cx,
+ Handle<NativeObject*> obj,
+ HandleId id,
+ PropertyFlags flags) {
+ MOZ_ASSERT(!id.isVoid());
+
+ AutoCheckShapeConsistency check(obj);
+ AssertValidArrayIndex(obj, id);
+ AssertValidCustomDataProp(obj, flags);
+
+ if (!Watchtower::watchPropertyChange(cx, obj, id, flags)) {
+ return false;
+ }
+
+ Rooted<PropMap*> map(cx, obj->shape()->propMap());
+ uint32_t mapLength = obj->shape()->propMapLength();
+
+ uint32_t propIndex;
+ Rooted<PropMap*> propMap(cx, map->lookup(cx, mapLength, id, &propIndex));
+ MOZ_ASSERT(propMap);
+
+ PropertyInfo oldProp = propMap->getPropertyInfo(propIndex);
+ MOZ_ASSERT(oldProp.isCustomDataProperty());
+ AssertCanChangeFlags(oldProp, flags);
+
+ // If the property flags are not changing, we're done.
+ if (oldProp.flags() == flags) {
+ return true;
+ }
+
+ const JSClass* clasp = obj->shape()->getObjectClass();
+ ObjectFlags objectFlags = obj->shape()->objectFlags();
+
+ if (map->isShared()) {
+ // Fast path for changing the last property in a SharedPropMap. Call
+ // getPrevious to "remove" the last property and then call
+ // addCustomDataProperty to re-add the last property with the new flags.
+ if (propMap == map && propIndex == mapLength - 1) {
+ MOZ_ASSERT(obj->getLastProperty().key() == id);
+
+ Rooted<SharedPropMap*> sharedMap(cx, map->asShared());
+ SharedPropMap::getPrevious(&sharedMap, &mapLength);
+
+ if (!SharedPropMap::addCustomDataProperty(
+ cx, clasp, &sharedMap, &mapLength, id, flags, &objectFlags)) {
+ return false;
+ }
+
+ Shape* newShape = SharedShape::getPropMapShape(
+ cx, obj->shape()->base(), obj->shape()->numFixedSlots(), sharedMap,
+ mapLength, objectFlags);
+ if (!newShape) {
+ return false;
+ }
+ obj->setShape(newShape);
+ return true;
+ }
+
+ // Changing a non-last property. Switch to dictionary mode and relookup
+ // pointers for the new dictionary map.
+ if (!NativeObject::toDictionaryMode(cx, obj)) {
+ return false;
+ }
+ map = obj->shape()->propMap();
+ propMap = map->lookup(cx, mapLength, id, &propIndex);
+ MOZ_ASSERT(propMap);
+ } else {
+ if (!NativeObject::generateNewDictionaryShape(cx, obj)) {
+ return false;
+ }
+ }
+
+ // The object has a new dictionary shape (see toDictionaryMode and
+ // generateNewDictionaryShape calls above), so we can mutate the map and shape
+ // in place.
+
+ MOZ_ASSERT(map->isDictionary());
+ MOZ_ASSERT(propMap->isDictionary());
+
+ propMap->asDictionary()->changePropertyFlags(cx, clasp, propIndex, flags,
+ &objectFlags);
+ obj->dictionaryShape()->setObjectFlagsOfNewShape(objectFlags);
+ return true;
+}
+
+void NativeObject::maybeFreeDictionaryPropSlots(JSContext* cx,
+ DictionaryPropMap* map,
+ uint32_t mapLength) {
+ // We can free all non-reserved slots if there are no properties left. We also
+ // handle the case where there's a single slotless property, to support arrays
+ // (array.length is a custom data property).
+
+ MOZ_ASSERT(dictionaryShape()->propMap() == map);
+ MOZ_ASSERT(shape()->propMapLength() == mapLength);
+
+ if (mapLength > 1 || map->previous()) {
+ return;
+ }
+ if (mapLength == 1 && map->getPropertyInfo(0).hasSlot()) {
+ return;
+ }
+
+ uint32_t oldSpan = dictionaryModeSlotSpan();
+ uint32_t newSpan = JSCLASS_RESERVED_SLOTS(getClass());
+ if (oldSpan == newSpan) {
+ return;
+ }
+
+ MOZ_ASSERT(newSpan < oldSpan);
+
+ // Trigger write barriers on the old slots before reallocating.
+ prepareSlotRangeForOverwrite(newSpan, oldSpan);
+ invalidateSlotRange(newSpan, oldSpan);
+
+ uint32_t oldCapacity = numDynamicSlots();
+ uint32_t newCapacity =
+ calculateDynamicSlots(numFixedSlots(), newSpan, getClass());
+ if (newCapacity < oldCapacity) {
+ shrinkSlots(cx, oldCapacity, newCapacity);
+ }
+
+ setDictionaryModeSlotSpan(newSpan);
+ map->setFreeList(SHAPE_INVALID_SLOT);
+}
+
+void NativeObject::setShapeAndRemoveLastSlot(JSContext* cx,
+ SharedShape* newShape,
+ uint32_t slot) {
+ MOZ_ASSERT(!inDictionaryMode());
+ MOZ_ASSERT(newShape->isShared());
+ MOZ_ASSERT(newShape->slotSpan() == slot);
+
+ uint32_t numFixed = newShape->numFixedSlots();
+ if (slot < numFixed) {
+ setFixedSlot(slot, UndefinedValue());
+ } else {
+ setDynamicSlot(numFixed, slot, UndefinedValue());
+ uint32_t oldCapacity = numDynamicSlots();
+ uint32_t newCapacity = calculateDynamicSlots(numFixed, slot, getClass());
+ MOZ_ASSERT(newCapacity <= oldCapacity);
+ if (newCapacity < oldCapacity) {
+ shrinkSlots(cx, oldCapacity, newCapacity);
+ }
+ }
+
+ setShape(newShape);
+}
+
+/* static */
+bool NativeObject::removeProperty(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id) {
+ AutoCheckShapeConsistency check(obj);
+
+ Rooted<PropMap*> map(cx, obj->shape()->propMap());
+ uint32_t mapLength = obj->shape()->propMapLength();
+
+ AutoKeepPropMapTables keep(cx);
+ PropMapTable* table;
+ PropMapTable::Ptr ptr;
+ Rooted<PropMap*> propMap(cx);
+ uint32_t propIndex;
+ if (!PropMap::lookupForRemove(cx, map, mapLength, id, keep, propMap.address(),
+ &propIndex, &table, &ptr)) {
+ return false;
+ }
+
+ if (!propMap) {
+ return true;
+ }
+
+ if (!Watchtower::watchPropertyRemove(cx, obj, id)) {
+ return false;
+ }
+
+ PropertyInfo prop = propMap->getPropertyInfo(propIndex);
+
+ // If we're removing an accessor property, ensure the HadGetterSetterChange
+ // object flag is set. This is necessary because the slot holding the
+ // GetterSetter can be changed indirectly by removing the property and then
+ // adding it back with a different GetterSetter value but the same shape.
+ if (prop.isAccessorProperty() && !obj->hadGetterSetterChange()) {
+ if (!NativeObject::setHadGetterSetterChange(cx, obj)) {
+ return false;
+ }
+ }
+
+ if (map->isShared()) {
+ // Fast path for removing the last property from a SharedPropMap. In this
+ // case we can just call getPrevious and then look up a shape for the
+ // resulting map/mapLength.
+ if (propMap == map && propIndex == mapLength - 1) {
+ MOZ_ASSERT(obj->getLastProperty().key() == id);
+
+ Rooted<SharedPropMap*> sharedMap(cx, map->asShared());
+ SharedPropMap::getPrevious(&sharedMap, &mapLength);
+
+ SharedShape* shape = obj->sharedShape();
+ SharedShape* newShape;
+ if (sharedMap) {
+ newShape = SharedShape::getPropMapShape(
+ cx, shape->base(), shape->numFixedSlots(), sharedMap, mapLength,
+ shape->objectFlags());
+ } else {
+ newShape = SharedShape::getInitialShape(
+ cx, shape->getObjectClass(), shape->realm(), shape->proto(),
+ shape->numFixedSlots(), shape->objectFlags());
+ }
+ if (!newShape) {
+ return false;
+ }
+
+ if (MOZ_LIKELY(prop.hasSlot())) {
+ if (MOZ_LIKELY(prop.slot() == newShape->slotSpan())) {
+ obj->setShapeAndRemoveLastSlot(cx, newShape, prop.slot());
+ return true;
+ }
+ // Uncommon case: the property is stored in a reserved slot.
+ // See NativeObject::addPropertyInReservedSlot.
+ MOZ_ASSERT(prop.slot() < JSCLASS_RESERVED_SLOTS(obj->getClass()));
+ obj->setSlot(prop.slot(), UndefinedValue());
+ }
+ obj->setShape(newShape);
+ return true;
+ }
+
+ // Removing a non-last property. Switch to dictionary mode and relookup
+ // pointers for the new dictionary map.
+ if (!NativeObject::toDictionaryMode(cx, obj)) {
+ return false;
+ }
+ map = obj->shape()->propMap();
+ if (!PropMap::lookupForRemove(cx, map, mapLength, id, keep,
+ propMap.address(), &propIndex, &table,
+ &ptr)) {
+ return false;
+ }
+ } else {
+ if (!NativeObject::generateNewDictionaryShape(cx, obj)) {
+ return false;
+ }
+ }
+
+ // The object has a new dictionary shape (see toDictionaryMode and
+ // generateNewDictionaryShape calls above), so we can mutate the map and shape
+ // in place.
+
+ MOZ_ASSERT(map->isDictionary());
+ MOZ_ASSERT(table);
+ MOZ_ASSERT(prop == ptr->propertyInfo());
+
+ Rooted<DictionaryPropMap*> dictMap(cx, map->asDictionary());
+
+ // If the property has a slot, free its slot number.
+ if (prop.hasSlot()) {
+ obj->freeDictionarySlot(prop.slot());
+ }
+
+ DictionaryPropMap::removeProperty(cx, &dictMap, &mapLength, table, ptr);
+
+ obj->dictionaryShape()->updateNewShape(obj->shape()->objectFlags(), dictMap,
+ mapLength);
+
+ // If we just deleted the last property, consider shrinking the slots. We only
+ // do this if there are a lot of slots, to avoid allocating/freeing dynamic
+ // slots repeatedly.
+ static constexpr size_t MinSlotSpanForFree = 64;
+ if (obj->dictionaryModeSlotSpan() >= MinSlotSpanForFree) {
+ obj->maybeFreeDictionaryPropSlots(cx, dictMap, mapLength);
+ }
+
+ return true;
+}
+
+/* static */
+bool NativeObject::densifySparseElements(JSContext* cx,
+ Handle<NativeObject*> obj) {
+ AutoCheckShapeConsistency check(obj);
+ MOZ_ASSERT(obj->inDictionaryMode());
+
+ // First generate a new dictionary shape so that the shape and map can then
+ // be updated infallibly.
+ if (!NativeObject::generateNewDictionaryShape(cx, obj)) {
+ return false;
+ }
+
+ Rooted<DictionaryPropMap*> map(cx, obj->shape()->propMap()->asDictionary());
+ uint32_t mapLength = obj->shape()->propMapLength();
+
+ DictionaryPropMap::densifyElements(cx, &map, &mapLength, obj);
+
+ // All indexed properties on the object are now dense. Clear the indexed
+ // flag so that we will not start using sparse indexes again if we need
+ // to grow the object.
+ ObjectFlags objectFlags = obj->shape()->objectFlags();
+ objectFlags.clearFlag(ObjectFlag::Indexed);
+
+ obj->dictionaryShape()->updateNewShape(objectFlags, map, mapLength);
+
+ obj->maybeFreeDictionaryPropSlots(cx, map, mapLength);
+
+ return true;
+}
+
+// static
+bool NativeObject::freezeOrSealProperties(JSContext* cx,
+ Handle<NativeObject*> obj,
+ IntegrityLevel level) {
+ AutoCheckShapeConsistency check(obj);
+
+ if (!Watchtower::watchFreezeOrSeal(cx, obj)) {
+ return false;
+ }
+
+ uint32_t mapLength = obj->shape()->propMapLength();
+ MOZ_ASSERT(mapLength > 0);
+
+ const JSClass* clasp = obj->shape()->getObjectClass();
+ ObjectFlags objectFlags = obj->shape()->objectFlags();
+
+ if (obj->inDictionaryMode()) {
+ // First generate a new dictionary shape so that the map and shape can be
+ // updated infallibly.
+ if (!generateNewDictionaryShape(cx, obj)) {
+ return false;
+ }
+ DictionaryPropMap* map = obj->dictionaryShape()->propMap();
+ map->freezeOrSealProperties(cx, level, clasp, mapLength, &objectFlags);
+ obj->dictionaryShape()->updateNewShape(objectFlags, map, mapLength);
+ return true;
+ }
+
+ Rooted<SharedPropMap*> map(cx, obj->sharedShape()->propMap());
+ if (!SharedPropMap::freezeOrSealProperties(cx, level, clasp, &map, mapLength,
+ &objectFlags)) {
+ return false;
+ }
+
+ SharedShape* newShape = SharedShape::getPropMapShape(
+ cx, obj->shape()->base(), obj->numFixedSlots(), map, mapLength,
+ objectFlags);
+ if (!newShape) {
+ return false;
+ }
+ MOZ_ASSERT(obj->sharedShape()->slotSpan() == newShape->slotSpan());
+
+ obj->setShape(newShape);
+ return true;
+}
+
+/* static */
+bool NativeObject::generateNewDictionaryShape(JSContext* cx,
+ Handle<NativeObject*> obj) {
+ // Clone the current dictionary shape to a new shape. This ensures ICs and
+ // other shape guards are properly invalidated before we start mutating the
+ // map or new shape.
+
+ MOZ_ASSERT(obj->inDictionaryMode());
+
+ Shape* shape = DictionaryShape::new_(cx, obj);
+ if (!shape) {
+ return false;
+ }
+
+ obj->setShape(shape);
+ return true;
+}
+
+/* static */
+bool JSObject::setFlag(JSContext* cx, HandleObject obj, ObjectFlag flag) {
+ MOZ_ASSERT(cx->compartment() == obj->compartment());
+
+ if (obj->hasFlag(flag)) {
+ return true;
+ }
+
+ ObjectFlags objectFlags = obj->shape()->objectFlags();
+ objectFlags.setFlag(flag);
+
+ uint32_t numFixed =
+ obj->is<NativeObject>() ? obj->as<NativeObject>().numFixedSlots() : 0;
+ return Shape::replaceShape(cx, obj, objectFlags, obj->shape()->proto(),
+ numFixed);
+}
+
+static bool SetObjectIsUsedAsPrototype(JSContext* cx, Handle<JSObject*> proto) {
+ MOZ_ASSERT(!proto->isUsedAsPrototype());
+
+ // Ensure the proto object has a unique id to prevent OOM crashes below.
+ uint64_t unused;
+ if (!gc::GetOrCreateUniqueId(proto, &unused)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return JSObject::setIsUsedAsPrototype(cx, proto);
+}
+
+/* static */
+bool JSObject::setProtoUnchecked(JSContext* cx, HandleObject obj,
+ Handle<TaggedProto> proto) {
+ MOZ_ASSERT(cx->compartment() == obj->compartment());
+ MOZ_ASSERT(!obj->staticPrototypeIsImmutable());
+ MOZ_ASSERT_IF(!obj->is<ProxyObject>(), obj->nonProxyIsExtensible());
+ MOZ_ASSERT(obj->shape()->proto() != proto);
+
+ // Notify Watchtower of this proto change, so it can properly invalidate shape
+ // teleporting and other optimizations.
+ if (!Watchtower::watchProtoChange(cx, obj)) {
+ return false;
+ }
+
+ if (proto.isObject() && !proto.toObject()->isUsedAsPrototype()) {
+ RootedObject protoObj(cx, proto.toObject());
+ if (!SetObjectIsUsedAsPrototype(cx, protoObj)) {
+ return false;
+ }
+ }
+
+ uint32_t numFixed =
+ obj->is<NativeObject>() ? obj->as<NativeObject>().numFixedSlots() : 0;
+ return Shape::replaceShape(cx, obj, obj->shape()->objectFlags(), proto,
+ numFixed);
+}
+
+/* static */
+bool NativeObject::changeNumFixedSlotsAfterSwap(JSContext* cx,
+ Handle<NativeObject*> obj,
+ uint32_t nfixed) {
+ MOZ_ASSERT(nfixed != obj->shape()->numFixedSlots());
+
+ return Shape::replaceShape(cx, obj, obj->shape()->objectFlags(),
+ obj->shape()->proto(), nfixed);
+}
+
+BaseShape::BaseShape(const JSClass* clasp, JS::Realm* realm, TaggedProto proto)
+ : TenuredCellWithNonGCPointer(clasp), realm_(realm), proto_(proto) {
+#ifdef DEBUG
+ AssertJSClassInvariants(clasp);
+#endif
+
+ MOZ_ASSERT_IF(proto.isObject(),
+ compartment() == proto.toObject()->compartment());
+ MOZ_ASSERT_IF(proto.isObject(), proto.toObject()->isUsedAsPrototype());
+
+ // Windows may not appear on prototype chains.
+ MOZ_ASSERT_IF(proto.isObject(), !IsWindow(proto.toObject()));
+
+#ifdef DEBUG
+ if (GlobalObject* global = realm->unsafeUnbarrieredMaybeGlobal()) {
+ AssertTargetIsNotGray(global);
+ }
+#endif
+}
+
+/* static */
+BaseShape* BaseShape::get(JSContext* cx, const JSClass* clasp, JS::Realm* realm,
+ Handle<TaggedProto> proto) {
+ auto& table = cx->zone()->shapeZone().baseShapes;
+
+ using Lookup = BaseShapeHasher::Lookup;
+
+ auto p = MakeDependentAddPtr(cx, table, Lookup(clasp, realm, proto));
+ if (p) {
+ return *p;
+ }
+
+ BaseShape* nbase = cx->newCell<BaseShape>(clasp, realm, proto);
+ if (!nbase) {
+ return nullptr;
+ }
+
+ if (!p.add(cx, table, Lookup(clasp, realm, proto), nbase)) {
+ return nullptr;
+ }
+
+ return nbase;
+}
+
+// static
+SharedShape* SharedShape::new_(JSContext* cx, Handle<BaseShape*> base,
+ ObjectFlags objectFlags, uint32_t nfixed,
+ Handle<SharedPropMap*> map, uint32_t mapLength) {
+ return cx->newCell<SharedShape>(base, objectFlags, nfixed, map, mapLength);
+}
+
+// static
+DictionaryShape* DictionaryShape::new_(JSContext* cx, Handle<BaseShape*> base,
+ ObjectFlags objectFlags, uint32_t nfixed,
+ Handle<DictionaryPropMap*> map,
+ uint32_t mapLength) {
+ return cx->newCell<DictionaryShape>(base, objectFlags, nfixed, map,
+ mapLength);
+}
+
+DictionaryShape::DictionaryShape(NativeObject* nobj)
+ : DictionaryShape(nobj->shape()->base(), nobj->shape()->objectFlags(),
+ nobj->shape()->numFixedSlots(),
+ nobj->dictionaryShape()->propMap(),
+ nobj->shape()->propMapLength()) {}
+
+// static
+DictionaryShape* DictionaryShape::new_(JSContext* cx,
+ Handle<NativeObject*> obj) {
+ return cx->newCell<DictionaryShape>(obj);
+}
+
+// static
+ProxyShape* ProxyShape::new_(JSContext* cx, Handle<BaseShape*> base,
+ ObjectFlags objectFlags) {
+ return cx->newCell<ProxyShape>(base, objectFlags);
+}
+
+// static
+WasmGCShape* WasmGCShape::new_(JSContext* cx, Handle<BaseShape*> base,
+ const wasm::RecGroup* recGroup,
+ ObjectFlags objectFlags) {
+ WasmGCShape* shape = cx->newCell<WasmGCShape>(base, recGroup, objectFlags);
+ if (shape) {
+ shape->init();
+ }
+ return shape;
+}
+
+MOZ_ALWAYS_INLINE HashNumber ShapeForAddHasher::hash(const Lookup& l) {
+ HashNumber hash = HashPropertyKey(l.key);
+ return mozilla::AddToHash(hash, l.flags.toRaw());
+}
+
+MOZ_ALWAYS_INLINE bool ShapeForAddHasher::match(SharedShape* shape,
+ const Lookup& l) {
+ uint32_t slot;
+ return shape->lastPropertyMatchesForAdd(l.key, l.flags, &slot);
+}
+
+#ifdef DEBUG
+void Shape::dump(js::GenericPrinter& out) const {
+ out.printf("shape @ 0x%p\n", this);
+ out.printf("base: 0x%p\n", base());
+ switch (kind()) {
+ case Kind::Shared:
+ out.printf("kind: Shared\n");
+ break;
+ case Kind::Dictionary:
+ out.printf("kind: Dictionary\n");
+ break;
+ case Kind::Proxy:
+ out.printf("kind: Proxy\n");
+ break;
+ case Kind::WasmGC:
+ out.printf("kind: WasmGC\n");
+ break;
+ }
+ if (isNative()) {
+ out.printf("mapLength: %u\n", asNative().propMapLength());
+ if (asNative().propMap()) {
+ out.printf("map:\n");
+ asNative().propMap()->dump(out);
+ } else {
+ out.printf("map: (none)\n");
+ }
+ }
+}
+
+void Shape::dump() const {
+ Fprinter out(stderr);
+ dump(out);
+}
+#endif // DEBUG
+
+/* static */
+SharedShape* SharedShape::getInitialShape(JSContext* cx, const JSClass* clasp,
+ JS::Realm* realm, TaggedProto proto,
+ size_t nfixed,
+ ObjectFlags objectFlags) {
+ MOZ_ASSERT(cx->compartment() == realm->compartment());
+ MOZ_ASSERT_IF(proto.isObject(),
+ cx->isInsideCurrentCompartment(proto.toObject()));
+
+ if (proto.isObject()) {
+ if (proto.toObject()->isUsedAsPrototype()) {
+ // Use the cache on the prototype's shape to get to the initial shape.
+ // This cache has a hit rate of 80-90% on typical workloads and is faster
+ // than the HashSet lookup below.
+ JSObject* protoObj = proto.toObject();
+ Shape* protoObjShape = protoObj->shape();
+ if (protoObjShape->cache().isShapeWithProto()) {
+ SharedShape* shape = protoObjShape->cache().toShapeWithProto();
+ if (shape->numFixedSlots() == nfixed &&
+ shape->objectFlags() == objectFlags &&
+ shape->getObjectClass() == clasp && shape->realm() == realm &&
+ shape->proto() == proto) {
+#ifdef DEBUG
+ // Verify the table lookup below would have resulted in the same
+ // shape.
+ using Lookup = InitialShapeHasher::Lookup;
+ Lookup lookup(clasp, realm, proto, nfixed, objectFlags);
+ auto p = realm->zone()->shapeZone().initialShapes.lookup(lookup);
+ MOZ_ASSERT(*p == shape);
+#endif
+ return shape;
+ }
+ }
+ } else {
+ RootedObject protoObj(cx, proto.toObject());
+ if (!SetObjectIsUsedAsPrototype(cx, protoObj)) {
+ return nullptr;
+ }
+ proto = TaggedProto(protoObj);
+ }
+ }
+
+ auto& table = realm->zone()->shapeZone().initialShapes;
+
+ using Lookup = InitialShapeHasher::Lookup;
+ auto ptr = MakeDependentAddPtr(
+ cx, table, Lookup(clasp, realm, proto, nfixed, objectFlags));
+ if (ptr) {
+ // Cache the result of this lookup on the prototype's shape.
+ if (proto.isObject()) {
+ JSObject* protoObj = proto.toObject();
+ Shape* protoShape = protoObj->shape();
+ if (!protoShape->cache().isForAdd() &&
+ RegisterShapeCache(cx, protoShape)) {
+ protoShape->cacheRef().setShapeWithProto(*ptr);
+ }
+ }
+ return *ptr;
+ }
+
+ Rooted<TaggedProto> protoRoot(cx, proto);
+ Rooted<BaseShape*> nbase(cx, BaseShape::get(cx, clasp, realm, protoRoot));
+ if (!nbase) {
+ return nullptr;
+ }
+
+ Rooted<SharedShape*> shape(
+ cx, SharedShape::new_(cx, nbase, objectFlags, nfixed, nullptr, 0));
+ if (!shape) {
+ return nullptr;
+ }
+
+ Lookup lookup(clasp, realm, protoRoot, nfixed, objectFlags);
+ if (!ptr.add(cx, table, lookup, shape)) {
+ return nullptr;
+ }
+
+ return shape;
+}
+
+/* static */
+SharedShape* SharedShape::getInitialShape(JSContext* cx, const JSClass* clasp,
+ JS::Realm* realm, TaggedProto proto,
+ gc::AllocKind kind,
+ ObjectFlags objectFlags) {
+ return getInitialShape(cx, clasp, realm, proto, GetGCKindSlots(kind),
+ objectFlags);
+}
+
+/* static */
+SharedShape* SharedShape::getPropMapShape(
+ JSContext* cx, BaseShape* base, size_t nfixed, Handle<SharedPropMap*> map,
+ uint32_t mapLength, ObjectFlags objectFlags, bool* allocatedNewShape) {
+ MOZ_ASSERT(cx->compartment() == base->compartment());
+ MOZ_ASSERT_IF(base->proto().isObject(),
+ cx->isInsideCurrentCompartment(base->proto().toObject()));
+ MOZ_ASSERT_IF(base->proto().isObject(),
+ base->proto().toObject()->isUsedAsPrototype());
+ MOZ_ASSERT(map);
+ MOZ_ASSERT(mapLength > 0);
+
+ auto& table = cx->zone()->shapeZone().propMapShapes;
+
+ using Lookup = PropMapShapeHasher::Lookup;
+ auto ptr = MakeDependentAddPtr(
+ cx, table, Lookup(base, nfixed, map, mapLength, objectFlags));
+ if (ptr) {
+ if (allocatedNewShape) {
+ *allocatedNewShape = false;
+ }
+ return *ptr;
+ }
+
+ Rooted<BaseShape*> baseRoot(cx, base);
+ Rooted<SharedShape*> shape(
+ cx, SharedShape::new_(cx, baseRoot, objectFlags, nfixed, map, mapLength));
+ if (!shape) {
+ return nullptr;
+ }
+
+ Lookup lookup(baseRoot, nfixed, map, mapLength, objectFlags);
+ if (!ptr.add(cx, table, lookup, shape)) {
+ return nullptr;
+ }
+
+ if (allocatedNewShape) {
+ *allocatedNewShape = true;
+ }
+
+ return shape;
+}
+
+/* static */
+SharedShape* SharedShape::getInitialOrPropMapShape(
+ JSContext* cx, const JSClass* clasp, JS::Realm* realm, TaggedProto proto,
+ size_t nfixed, Handle<SharedPropMap*> map, uint32_t mapLength,
+ ObjectFlags objectFlags) {
+ if (!map) {
+ MOZ_ASSERT(mapLength == 0);
+ return getInitialShape(cx, clasp, realm, proto, nfixed, objectFlags);
+ }
+
+ Rooted<TaggedProto> protoRoot(cx, proto);
+ BaseShape* nbase = BaseShape::get(cx, clasp, realm, protoRoot);
+ if (!nbase) {
+ return nullptr;
+ }
+
+ return getPropMapShape(cx, nbase, nfixed, map, mapLength, objectFlags);
+}
+
+/* static */
+void SharedShape::insertInitialShape(JSContext* cx,
+ Handle<SharedShape*> shape) {
+ using Lookup = InitialShapeHasher::Lookup;
+ Lookup lookup(shape->getObjectClass(), shape->realm(), shape->proto(),
+ shape->numFixedSlots(), shape->objectFlags());
+
+ auto& table = cx->zone()->shapeZone().initialShapes;
+ InitialShapeSet::Ptr p = table.lookup(lookup);
+ MOZ_ASSERT(p);
+
+ // The metadata callback can end up causing redundant changes of the initial
+ // shape.
+ SharedShape* initialShape = *p;
+ if (initialShape == shape) {
+ return;
+ }
+
+ MOZ_ASSERT(initialShape->numFixedSlots() == shape->numFixedSlots());
+ MOZ_ASSERT(initialShape->base() == shape->base());
+ MOZ_ASSERT(initialShape->objectFlags() == shape->objectFlags());
+
+ table.replaceKey(p, lookup, shape.get());
+
+ // Purge the prototype's shape cache entry.
+ if (shape->proto().isObject()) {
+ JSObject* protoObj = shape->proto().toObject();
+ if (protoObj->shape()->cache().isShapeWithProto()) {
+ protoObj->shape()->cacheRef().setNone();
+ }
+ }
+}
+
+/* static */
+ProxyShape* ProxyShape::getShape(JSContext* cx, const JSClass* clasp,
+ JS::Realm* realm, TaggedProto proto,
+ ObjectFlags objectFlags) {
+ MOZ_ASSERT(cx->compartment() == realm->compartment());
+ MOZ_ASSERT_IF(proto.isObject(),
+ cx->isInsideCurrentCompartment(proto.toObject()));
+
+ if (proto.isObject() && !proto.toObject()->isUsedAsPrototype()) {
+ RootedObject protoObj(cx, proto.toObject());
+ if (!SetObjectIsUsedAsPrototype(cx, protoObj)) {
+ return nullptr;
+ }
+ proto = TaggedProto(protoObj);
+ }
+
+ auto& table = realm->zone()->shapeZone().proxyShapes;
+
+ using Lookup = ProxyShapeHasher::Lookup;
+ auto ptr =
+ MakeDependentAddPtr(cx, table, Lookup(clasp, realm, proto, objectFlags));
+ if (ptr) {
+ return *ptr;
+ }
+
+ Rooted<TaggedProto> protoRoot(cx, proto);
+ Rooted<BaseShape*> nbase(cx, BaseShape::get(cx, clasp, realm, protoRoot));
+ if (!nbase) {
+ return nullptr;
+ }
+
+ Rooted<ProxyShape*> shape(cx, ProxyShape::new_(cx, nbase, objectFlags));
+ if (!shape) {
+ return nullptr;
+ }
+
+ Lookup lookup(clasp, realm, protoRoot, objectFlags);
+ if (!ptr.add(cx, table, lookup, shape)) {
+ return nullptr;
+ }
+
+ return shape;
+}
+
+/* static */
+WasmGCShape* WasmGCShape::getShape(JSContext* cx, const JSClass* clasp,
+ JS::Realm* realm, TaggedProto proto,
+ const wasm::RecGroup* recGroup,
+ ObjectFlags objectFlags) {
+ MOZ_ASSERT(cx->compartment() == realm->compartment());
+ MOZ_ASSERT_IF(proto.isObject(),
+ cx->isInsideCurrentCompartment(proto.toObject()));
+
+ if (proto.isObject() && !proto.toObject()->isUsedAsPrototype()) {
+ RootedObject protoObj(cx, proto.toObject());
+ if (!SetObjectIsUsedAsPrototype(cx, protoObj)) {
+ return nullptr;
+ }
+ proto = TaggedProto(protoObj);
+ }
+
+ auto& table = realm->zone()->shapeZone().wasmGCShapes;
+
+ using Lookup = WasmGCShapeHasher::Lookup;
+ auto ptr = MakeDependentAddPtr(
+ cx, table, Lookup(clasp, realm, proto, recGroup, objectFlags));
+ if (ptr) {
+ return *ptr;
+ }
+
+ Rooted<TaggedProto> protoRoot(cx, proto);
+ Rooted<BaseShape*> nbase(cx, BaseShape::get(cx, clasp, realm, protoRoot));
+ if (!nbase) {
+ return nullptr;
+ }
+
+ Rooted<WasmGCShape*> shape(
+ cx, WasmGCShape::new_(cx, nbase, recGroup, objectFlags));
+ if (!shape) {
+ return nullptr;
+ }
+
+ Lookup lookup(clasp, realm, protoRoot, recGroup, objectFlags);
+ if (!ptr.add(cx, table, lookup, shape)) {
+ return nullptr;
+ }
+
+ return shape;
+}
+
+JS::ubi::Node::Size JS::ubi::Concrete<js::Shape>::size(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ Size size = js::gc::Arena::thingSize(get().asTenured().getAllocKind());
+
+ if (get().cache().isShapeSetForAdd()) {
+ ShapeSetForAdd* set = get().cache().toShapeSetForAdd();
+ size += set->shallowSizeOfIncludingThis(mallocSizeOf);
+ }
+
+ return size;
+}
+
+JS::ubi::Node::Size JS::ubi::Concrete<js::BaseShape>::size(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return js::gc::Arena::thingSize(get().asTenured().getAllocKind());
+}
diff --git a/js/src/vm/Shape.h b/js/src/vm/Shape.h
new file mode 100644
index 0000000000..003a6e398a
--- /dev/null
+++ b/js/src/vm/Shape.h
@@ -0,0 +1,925 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Shape_h
+#define vm_Shape_h
+
+#include "js/shadow/Shape.h" // JS::shadow::Shape, JS::shadow::BaseShape
+
+#include "mozilla/Attributes.h"
+#include "mozilla/MemoryReporting.h"
+
+#include "jstypes.h"
+#include "NamespaceImports.h"
+
+#include "gc/Barrier.h"
+#include "gc/MaybeRooted.h"
+#include "js/HashTable.h"
+#include "js/Id.h" // JS::PropertyKey
+#include "js/MemoryMetrics.h"
+#include "js/RootingAPI.h"
+#include "js/UbiNode.h"
+#include "util/EnumFlags.h"
+#include "vm/ObjectFlags.h"
+#include "vm/PropertyInfo.h"
+#include "vm/PropMap.h"
+#include "vm/TaggedProto.h"
+
+// [SMDOC] Shapes
+//
+// A Shape represents the layout of an object. It stores and implies:
+//
+// * The object's JSClass, Realm, prototype (see BaseShape section below).
+// * The object's flags (ObjectFlags).
+// * For native objects, the object's properties (PropMap and map length).
+// * For native objects, the fixed slot capacity of the object (numFixedSlots).
+//
+// For native objects, the shape implies the property structure (keys,
+// attributes, property order for enumeration) but not the property values.
+// The values are stored in object slots.
+//
+// Every JSObject has a pointer, |shape_|, accessible via shape(), to the
+// current shape of the object. This pointer permits fast object layout tests.
+//
+// Shapes use the following C++ class hierarchy:
+//
+// C++ Type Used by
+// ============================ ====================================
+// Shape (abstract) JSObject
+// |
+// +-- NativeShape (abstract) NativeObject
+// | |
+// | +-- SharedShape NativeObject with a shared shape
+// | |
+// | +-- DictionaryShape NativeObject with a dictionary shape
+// |
+// +-- ProxyShape ProxyObject
+// |
+// +-- WasmGCShape WasmGCObject
+//
+// Classes marked with (abstract) above are not literally C++ Abstract Base
+// Classes (since there are no virtual functions, pure or not, in this
+// hierarchy), but have the same meaning: there are no shapes with this type as
+// its most-derived type.
+//
+// SharedShape
+// ===========
+// Used only for native objects. This is either an initial shape (no property
+// map) or SharedPropMap shape (for objects with at least one property).
+//
+// These are immutable tuples stored in a hash table, so that objects with the
+// same structure end up with the same shape (this both saves memory and allows
+// JIT optimizations based on this shape).
+//
+// To avoid hash table lookups on the hot addProperty path, shapes have a
+// ShapeCachePtr that's used as cache for this. This cache is purged on GC.
+// The shape cache is also used as cache for prototype shapes, to point to the
+// initial shape for objects using that shape, and for cached iterators.
+//
+// DictionaryShape
+// ===============
+// Used only for native objects. An object with a dictionary shape is "in
+// dictionary mode". Certain property operations are not supported for shared
+// maps so in these cases we need to convert the object to dictionary mode by
+// creating a dictionary property map and a dictionary shape. An object is
+// converted to dictionary mode in the following cases:
+//
+// - Changing a property's flags/attributes and the property is not the last
+// property.
+// - Removing a property other than the object's last property.
+// - The object has many properties. See maybeConvertToDictionaryForAdd for the
+// heuristics.
+//
+// Dictionary shapes are unshared, private to a single object, and always have a
+// a DictionaryPropMap that's similarly unshared. Dictionary shape mutations do
+// require allocating a new dictionary shape for the object, to properly
+// invalidate JIT inline caches and other shape guards.
+// See NativeObject::generateNewDictionaryShape.
+//
+// ProxyShape
+// ==========
+// Shape used for proxy objects (including wrappers). Proxies with the same
+// JSClass, Realm, prototype and ObjectFlags will have the same shape.
+//
+// WasmGCShape
+// ===========
+// Shape used for Wasm GC objects. Wasm GC objects with the same JSClass, Realm,
+// prototype and ObjectFlags will have the same shape.
+//
+// BaseShape
+// =========
+// Because many Shapes have similar data, there is actually a secondary type
+// called a BaseShape that holds some of a Shape's data (the JSClass, Realm,
+// prototype). Many shapes can share a single BaseShape.
+
+MOZ_ALWAYS_INLINE size_t JSSLOT_FREE(const JSClass* clasp) {
+ // Proxy classes have reserved slots, but proxies manage their own slot
+ // layout.
+ MOZ_ASSERT(!clasp->isProxyObject());
+ return JSCLASS_RESERVED_SLOTS(clasp);
+}
+
+namespace js {
+
+class NativeShape;
+class Shape;
+class PropertyIteratorObject;
+
+namespace gc {
+class TenuringTracer;
+} // namespace gc
+
+namespace wasm {
+class RecGroup;
+} // namespace wasm
+
+// Hash policy for ShapeCachePtr's ShapeSetForAdd. Maps the new property key and
+// flags to the new shape.
+struct ShapeForAddHasher : public DefaultHasher<Shape*> {
+ using Key = SharedShape*;
+
+ struct Lookup {
+ PropertyKey key;
+ PropertyFlags flags;
+
+ Lookup(PropertyKey key, PropertyFlags flags) : key(key), flags(flags) {}
+ };
+
+ static MOZ_ALWAYS_INLINE HashNumber hash(const Lookup& l);
+ static MOZ_ALWAYS_INLINE bool match(SharedShape* shape, const Lookup& l);
+};
+using ShapeSetForAdd =
+ HashSet<SharedShape*, ShapeForAddHasher, SystemAllocPolicy>;
+
+// Each shape has a cache pointer that's either:
+//
+// * None
+// * For shared shapes, a single shape used to speed up addProperty.
+// * For shared shapes, a set of shapes used to speed up addProperty.
+// * For prototype shapes, the most recently used initial shape allocated for a
+// prototype object with this shape.
+// * For any shape, a PropertyIteratorObject used to speed up GetIterator.
+//
+// The cache is purely an optimization and is purged on GC (all shapes with a
+// non-None ShapeCachePtr are added to a vector in the Zone).
+class ShapeCachePtr {
+ enum {
+ SINGLE_SHAPE_FOR_ADD = 0,
+ SHAPE_SET_FOR_ADD = 1,
+ SHAPE_WITH_PROTO = 2,
+ ITERATOR = 3,
+ MASK = 3
+ };
+
+ uintptr_t bits = 0;
+
+ public:
+ bool isNone() const { return !bits; }
+ void setNone() { bits = 0; }
+
+ bool isSingleShapeForAdd() const {
+ return (bits & MASK) == SINGLE_SHAPE_FOR_ADD && !isNone();
+ }
+ SharedShape* toSingleShapeForAdd() const {
+ MOZ_ASSERT(isSingleShapeForAdd());
+ return reinterpret_cast<SharedShape*>(bits & ~uintptr_t(MASK));
+ }
+ void setSingleShapeForAdd(SharedShape* shape) {
+ MOZ_ASSERT(shape);
+ MOZ_ASSERT((uintptr_t(shape) & MASK) == 0);
+ MOZ_ASSERT(!isShapeSetForAdd()); // Don't leak the ShapeSet.
+ bits = uintptr_t(shape) | SINGLE_SHAPE_FOR_ADD;
+ }
+
+ bool isShapeSetForAdd() const { return (bits & MASK) == SHAPE_SET_FOR_ADD; }
+ ShapeSetForAdd* toShapeSetForAdd() const {
+ MOZ_ASSERT(isShapeSetForAdd());
+ return reinterpret_cast<ShapeSetForAdd*>(bits & ~uintptr_t(MASK));
+ }
+ void setShapeSetForAdd(ShapeSetForAdd* hash) {
+ MOZ_ASSERT(hash);
+ MOZ_ASSERT((uintptr_t(hash) & MASK) == 0);
+ bits = uintptr_t(hash) | SHAPE_SET_FOR_ADD;
+ }
+
+ bool isForAdd() const { return isSingleShapeForAdd() || isShapeSetForAdd(); }
+
+ bool isShapeWithProto() const { return (bits & MASK) == SHAPE_WITH_PROTO; }
+ SharedShape* toShapeWithProto() const {
+ MOZ_ASSERT(isShapeWithProto());
+ return reinterpret_cast<SharedShape*>(bits & ~uintptr_t(MASK));
+ }
+ void setShapeWithProto(SharedShape* shape) {
+ MOZ_ASSERT(shape);
+ MOZ_ASSERT((uintptr_t(shape) & MASK) == 0);
+ MOZ_ASSERT(!isShapeSetForAdd()); // Don't leak the ShapeSet.
+ bits = uintptr_t(shape) | SHAPE_WITH_PROTO;
+ }
+
+ bool isIterator() const { return (bits & MASK) == ITERATOR; }
+ PropertyIteratorObject* toIterator() const {
+ MOZ_ASSERT(isIterator());
+ return reinterpret_cast<PropertyIteratorObject*>(bits & ~uintptr_t(MASK));
+ }
+ void setIterator(PropertyIteratorObject* iter) {
+ MOZ_ASSERT(iter);
+ MOZ_ASSERT((uintptr_t(iter) & MASK) == 0);
+ MOZ_ASSERT(!isShapeSetForAdd()); // Don't leak the ShapeSet.
+ bits = uintptr_t(iter) | ITERATOR;
+ }
+ friend class js::jit::MacroAssembler;
+} JS_HAZ_GC_POINTER;
+
+// BaseShapes store the object's class, realm and prototype. BaseShapes are
+// immutable tuples stored in a per-Zone hash table.
+class BaseShape : public gc::TenuredCellWithNonGCPointer<const JSClass> {
+ public:
+ /* Class of referring object, stored in the cell header */
+ const JSClass* clasp() const { return headerPtr(); }
+
+ private:
+ JS::Realm* realm_;
+ GCPtr<TaggedProto> proto_;
+
+ BaseShape(const BaseShape& base) = delete;
+ BaseShape& operator=(const BaseShape& other) = delete;
+
+ public:
+ void finalize(JS::GCContext* gcx) {}
+
+ BaseShape(const JSClass* clasp, JS::Realm* realm, TaggedProto proto);
+
+ /* Not defined: BaseShapes must not be stack allocated. */
+ ~BaseShape() = delete;
+
+ JS::Realm* realm() const { return realm_; }
+ JS::Compartment* compartment() const {
+ return JS::GetCompartmentForRealm(realm());
+ }
+ JS::Compartment* maybeCompartment() const { return compartment(); }
+
+ TaggedProto proto() const { return proto_; }
+
+ /*
+ * Lookup base shapes from the zone's baseShapes table, adding if not
+ * already found.
+ */
+ static BaseShape* get(JSContext* cx, const JSClass* clasp, JS::Realm* realm,
+ Handle<TaggedProto> proto);
+
+ static const JS::TraceKind TraceKind = JS::TraceKind::BaseShape;
+
+ void traceChildren(JSTracer* trc);
+
+ static constexpr size_t offsetOfClasp() { return offsetOfHeaderPtr(); }
+
+ static constexpr size_t offsetOfRealm() {
+ return offsetof(BaseShape, realm_);
+ }
+
+ static constexpr size_t offsetOfProto() {
+ return offsetof(BaseShape, proto_);
+ }
+
+ private:
+ static void staticAsserts() {
+ static_assert(offsetOfClasp() == offsetof(JS::shadow::BaseShape, clasp));
+ static_assert(offsetOfRealm() == offsetof(JS::shadow::BaseShape, realm));
+ static_assert(sizeof(BaseShape) % gc::CellAlignBytes == 0,
+ "Things inheriting from gc::Cell must have a size that's "
+ "a multiple of gc::CellAlignBytes");
+ // Sanity check BaseShape size is what we expect.
+#ifdef JS_64BIT
+ static_assert(sizeof(BaseShape) == 3 * sizeof(void*));
+#else
+ static_assert(sizeof(BaseShape) == 4 * sizeof(void*));
+#endif
+ }
+};
+
+class Shape : public gc::CellWithTenuredGCPointer<gc::TenuredCell, BaseShape> {
+ friend class ::JSObject;
+ friend class ::JSFunction;
+ friend class GCMarker;
+ friend class NativeObject;
+ friend class SharedShape;
+ friend class PropertyTree;
+ friend class gc::TenuringTracer;
+ friend class JS::ubi::Concrete<Shape>;
+ friend class gc::RelocationOverlay;
+
+ public:
+ // Base shape, stored in the cell header.
+ BaseShape* base() const { return headerPtr(); }
+
+ using Kind = JS::shadow::Shape::Kind;
+
+ protected:
+ // Flags that are not modified after the Shape is created. Off-thread Ion
+ // compilation can access the immutableFlags word, so we don't want any
+ // mutable state here to avoid (TSan) races.
+ enum ImmutableFlags : uint32_t {
+ // For NativeShape: the length associated with the property map. This is a
+ // value in the range [0, PropMap::Capacity]. A length of 0 indicates the
+ // object is empty (has no properties).
+ MAP_LENGTH_MASK = BitMask(4),
+
+ // The Shape Kind. The NativeObject kinds have the low bit set.
+ KIND_SHIFT = 4,
+ KIND_MASK = 0b11,
+ IS_NATIVE_BIT = 0x1 << KIND_SHIFT,
+
+ // For NativeShape: the number of fixed slots in objects with this shape.
+ // FIXED_SLOTS_MAX is the biggest count of fixed slots a Shape can store.
+ FIXED_SLOTS_MAX = 0x1f,
+ FIXED_SLOTS_SHIFT = 6,
+ FIXED_SLOTS_MASK = uint32_t(FIXED_SLOTS_MAX << FIXED_SLOTS_SHIFT),
+
+ // For SharedShape: the slot span of the object, if it fits in a single
+ // byte. If the value is SMALL_SLOTSPAN_MAX, the slot span has to be
+ // computed based on the property map (which is slower).
+ //
+ // Note: NativeObject::addProperty will convert to dictionary mode before we
+ // reach this limit, but there are other places where we add properties to
+ // shapes, for example environment object shapes.
+ SMALL_SLOTSPAN_MAX = 0x3ff, // 10 bits.
+ SMALL_SLOTSPAN_SHIFT = 11,
+ SMALL_SLOTSPAN_MASK = uint32_t(SMALL_SLOTSPAN_MAX << SMALL_SLOTSPAN_SHIFT),
+ };
+
+ uint32_t immutableFlags; // Immutable flags, see above.
+ ObjectFlags objectFlags_; // Immutable object flags, see ObjectFlags.
+
+ // Cache used to speed up common operations on shapes.
+ ShapeCachePtr cache_;
+
+ // Give the object a shape that's similar to its current shape, but with the
+ // passed objectFlags, proto, and nfixed values.
+ static bool replaceShape(JSContext* cx, HandleObject obj,
+ ObjectFlags objectFlags, TaggedProto proto,
+ uint32_t nfixed);
+
+ public:
+ void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ JS::ShapeInfo* info) const {
+ if (cache_.isShapeSetForAdd()) {
+ info->shapesMallocHeapCache +=
+ cache_.toShapeSetForAdd()->shallowSizeOfIncludingThis(mallocSizeOf);
+ }
+ }
+
+ ShapeCachePtr& cacheRef() { return cache_; }
+ ShapeCachePtr cache() const { return cache_; }
+
+ void maybeCacheIterator(JSContext* cx, PropertyIteratorObject* iter);
+
+ const JSClass* getObjectClass() const { return base()->clasp(); }
+ JS::Realm* realm() const { return base()->realm(); }
+
+ JS::Compartment* compartment() const { return base()->compartment(); }
+ JS::Compartment* maybeCompartment() const {
+ return base()->maybeCompartment();
+ }
+
+ TaggedProto proto() const { return base()->proto(); }
+
+ ObjectFlags objectFlags() const { return objectFlags_; }
+ bool hasObjectFlag(ObjectFlag flag) const {
+ return objectFlags_.hasFlag(flag);
+ }
+
+ protected:
+ Shape(Kind kind, BaseShape* base, ObjectFlags objectFlags)
+ : CellWithTenuredGCPointer(base),
+ immutableFlags(uint32_t(kind) << KIND_SHIFT),
+ objectFlags_(objectFlags) {
+ MOZ_ASSERT(base);
+ MOZ_ASSERT(this->kind() == kind, "kind must fit in KIND_MASK");
+ MOZ_ASSERT(isNative() == base->clasp()->isNativeObject());
+ }
+
+ Shape(const Shape& other) = delete;
+
+ public:
+ Kind kind() const { return Kind((immutableFlags >> KIND_SHIFT) & KIND_MASK); }
+
+ bool isNative() const {
+ // Note: this is equivalent to `isShared() || isDictionary()`.
+ return immutableFlags & IS_NATIVE_BIT;
+ }
+
+ bool isShared() const { return kind() == Kind::Shared; }
+ bool isDictionary() const { return kind() == Kind::Dictionary; }
+ bool isProxy() const { return kind() == Kind::Proxy; }
+ bool isWasmGC() const { return kind() == Kind::WasmGC; }
+
+ inline NativeShape& asNative();
+ inline SharedShape& asShared();
+ inline DictionaryShape& asDictionary();
+ inline WasmGCShape& asWasmGC();
+
+ inline const NativeShape& asNative() const;
+ inline const SharedShape& asShared() const;
+ inline const DictionaryShape& asDictionary() const;
+ inline const WasmGCShape& asWasmGC() const;
+
+#ifdef DEBUG
+ void dump(js::GenericPrinter& out) const;
+ void dump() const;
+#endif
+
+ inline void purgeCache(JS::GCContext* gcx);
+ inline void finalize(JS::GCContext* gcx);
+
+ static const JS::TraceKind TraceKind = JS::TraceKind::Shape;
+
+ void traceChildren(JSTracer* trc);
+
+ // For JIT usage.
+ static constexpr size_t offsetOfBaseShape() { return offsetOfHeaderPtr(); }
+
+ static constexpr size_t offsetOfObjectFlags() {
+ return offsetof(Shape, objectFlags_);
+ }
+
+ static inline size_t offsetOfImmutableFlags() {
+ return offsetof(Shape, immutableFlags);
+ }
+
+ static constexpr uint32_t kindShift() { return KIND_SHIFT; }
+ static constexpr uint32_t kindMask() { return KIND_MASK; }
+ static constexpr uint32_t isNativeBit() { return IS_NATIVE_BIT; }
+
+ static constexpr size_t offsetOfCachePtr() { return offsetof(Shape, cache_); }
+
+ private:
+ static void staticAsserts() {
+ static_assert(offsetOfBaseShape() == offsetof(JS::shadow::Shape, base));
+ static_assert(offsetof(Shape, immutableFlags) ==
+ offsetof(JS::shadow::Shape, immutableFlags));
+ static_assert(KIND_SHIFT == JS::shadow::Shape::KIND_SHIFT);
+ static_assert(KIND_MASK == JS::shadow::Shape::KIND_MASK);
+ static_assert(FIXED_SLOTS_SHIFT == JS::shadow::Shape::FIXED_SLOTS_SHIFT);
+ static_assert(FIXED_SLOTS_MASK == JS::shadow::Shape::FIXED_SLOTS_MASK);
+ }
+};
+
+// Shared or dictionary shape for a NativeObject.
+class NativeShape : public Shape {
+ protected:
+ // The shape's property map. This is either nullptr (for an
+ // initial SharedShape with no properties), a SharedPropMap (for
+ // SharedShape) or a DictionaryPropMap (for DictionaryShape).
+ GCPtr<PropMap*> propMap_;
+
+ NativeShape(Kind kind, BaseShape* base, ObjectFlags objectFlags,
+ uint32_t nfixed, PropMap* map, uint32_t mapLength)
+ : Shape(kind, base, objectFlags), propMap_(map) {
+ MOZ_ASSERT(base->clasp()->isNativeObject());
+ MOZ_ASSERT(mapLength <= PropMap::Capacity);
+ immutableFlags |= (nfixed << FIXED_SLOTS_SHIFT) | mapLength;
+ }
+
+ public:
+ void traceChildren(JSTracer* trc);
+
+ PropMap* propMap() const { return propMap_; }
+ uint32_t propMapLength() const { return immutableFlags & MAP_LENGTH_MASK; }
+
+ PropertyInfoWithKey lastProperty() const {
+ MOZ_ASSERT(propMapLength() > 0);
+ size_t index = propMapLength() - 1;
+ return propMap()->getPropertyInfoWithKey(index);
+ }
+
+ MOZ_ALWAYS_INLINE PropMap* lookup(JSContext* cx, PropertyKey key,
+ uint32_t* index);
+ MOZ_ALWAYS_INLINE PropMap* lookupPure(PropertyKey key, uint32_t* index);
+
+ uint32_t numFixedSlots() const {
+ return (immutableFlags & FIXED_SLOTS_MASK) >> FIXED_SLOTS_SHIFT;
+ }
+
+ // For JIT usage.
+ static constexpr uint32_t fixedSlotsMask() { return FIXED_SLOTS_MASK; }
+ static constexpr uint32_t fixedSlotsShift() { return FIXED_SLOTS_SHIFT; }
+};
+
+// Shared shape for a NativeObject.
+class SharedShape : public NativeShape {
+ friend class js::gc::CellAllocator;
+ SharedShape(BaseShape* base, ObjectFlags objectFlags, uint32_t nfixed,
+ SharedPropMap* map, uint32_t mapLength)
+ : NativeShape(Kind::Shared, base, objectFlags, nfixed, map, mapLength) {
+ initSmallSlotSpan();
+ }
+
+ static SharedShape* new_(JSContext* cx, Handle<BaseShape*> base,
+ ObjectFlags objectFlags, uint32_t nfixed,
+ Handle<SharedPropMap*> map, uint32_t mapLength);
+
+ void initSmallSlotSpan() {
+ MOZ_ASSERT(isShared());
+ uint32_t slotSpan = slotSpanSlow();
+ if (slotSpan > SMALL_SLOTSPAN_MAX) {
+ slotSpan = SMALL_SLOTSPAN_MAX;
+ }
+ MOZ_ASSERT((immutableFlags & SMALL_SLOTSPAN_MASK) == 0);
+ immutableFlags |= (slotSpan << SMALL_SLOTSPAN_SHIFT);
+ }
+
+ public:
+ SharedPropMap* propMap() const {
+ MOZ_ASSERT(isShared());
+ return propMap_ ? propMap_->asShared() : nullptr;
+ }
+ inline SharedPropMap* propMapMaybeForwarded() const;
+
+ bool lastPropertyMatchesForAdd(PropertyKey key, PropertyFlags flags,
+ uint32_t* slot) const {
+ MOZ_ASSERT(isShared());
+ MOZ_ASSERT(propMapLength() > 0);
+ uint32_t index = propMapLength() - 1;
+ SharedPropMap* map = propMap();
+ if (map->getKey(index) != key) {
+ return false;
+ }
+ PropertyInfo prop = map->getPropertyInfo(index);
+ if (prop.flags() != flags) {
+ return false;
+ }
+ *slot = prop.maybeSlot();
+ return true;
+ }
+
+ uint32_t slotSpanSlow() const {
+ MOZ_ASSERT(isShared());
+ const JSClass* clasp = getObjectClass();
+ return SharedPropMap::slotSpan(clasp, propMap(), propMapLength());
+ }
+ uint32_t slotSpan() const {
+ MOZ_ASSERT(isShared());
+ uint32_t span =
+ (immutableFlags & SMALL_SLOTSPAN_MASK) >> SMALL_SLOTSPAN_SHIFT;
+ if (MOZ_LIKELY(span < SMALL_SLOTSPAN_MAX)) {
+ MOZ_ASSERT(slotSpanSlow() == span);
+ return span;
+ }
+ return slotSpanSlow();
+ }
+
+ /*
+ * Lookup an initial shape matching the given parameters, creating an empty
+ * shape if none was found.
+ */
+ static SharedShape* getInitialShape(JSContext* cx, const JSClass* clasp,
+ JS::Realm* realm, TaggedProto proto,
+ size_t nfixed,
+ ObjectFlags objectFlags = {});
+ static SharedShape* getInitialShape(JSContext* cx, const JSClass* clasp,
+ JS::Realm* realm, TaggedProto proto,
+ gc::AllocKind kind,
+ ObjectFlags objectFlags = {});
+
+ static SharedShape* getPropMapShape(JSContext* cx, BaseShape* base,
+ size_t nfixed, Handle<SharedPropMap*> map,
+ uint32_t mapLength,
+ ObjectFlags objectFlags,
+ bool* allocatedNewShape = nullptr);
+
+ static SharedShape* getInitialOrPropMapShape(
+ JSContext* cx, const JSClass* clasp, JS::Realm* realm, TaggedProto proto,
+ size_t nfixed, Handle<SharedPropMap*> map, uint32_t mapLength,
+ ObjectFlags objectFlags);
+
+ /*
+ * Reinsert an alternate initial shape, to be returned by future
+ * getInitialShape calls, until the new shape becomes unreachable in a GC
+ * and the table entry is purged.
+ */
+ static void insertInitialShape(JSContext* cx, Handle<SharedShape*> shape);
+
+ /*
+ * Some object subclasses are allocated with a built-in set of properties.
+ * The first time such an object is created, these built-in properties must
+ * be set manually, to compute an initial shape. Afterward, that initial
+ * shape can be reused for newly-created objects that use the subclass's
+ * standard prototype. This method should be used in a post-allocation
+ * init method, to ensure that objects of such subclasses compute and cache
+ * the initial shape, if it hasn't already been computed.
+ */
+ template <class ObjectSubclass>
+ static inline bool ensureInitialCustomShape(JSContext* cx,
+ Handle<ObjectSubclass*> obj);
+};
+
+// Dictionary shape for a NativeObject.
+class DictionaryShape : public NativeShape {
+ friend class ::JSObject;
+ friend class js::gc::CellAllocator;
+ friend class NativeObject;
+
+ DictionaryShape(BaseShape* base, ObjectFlags objectFlags, uint32_t nfixed,
+ DictionaryPropMap* map, uint32_t mapLength)
+ : NativeShape(Kind::Dictionary, base, objectFlags, nfixed, map,
+ mapLength) {
+ MOZ_ASSERT(map);
+ }
+ explicit DictionaryShape(NativeObject* nobj);
+
+ // Methods to set fields of a new dictionary shape. Must not be used for
+ // shapes that might have been exposed to script.
+ void updateNewShape(ObjectFlags flags, DictionaryPropMap* map,
+ uint32_t mapLength) {
+ MOZ_ASSERT(isDictionary());
+ objectFlags_ = flags;
+ propMap_ = map;
+ immutableFlags = (immutableFlags & ~MAP_LENGTH_MASK) | mapLength;
+ MOZ_ASSERT(propMapLength() == mapLength);
+ }
+ void setObjectFlagsOfNewShape(ObjectFlags flags) {
+ MOZ_ASSERT(isDictionary());
+ objectFlags_ = flags;
+ }
+
+ public:
+ static DictionaryShape* new_(JSContext* cx, Handle<BaseShape*> base,
+ ObjectFlags objectFlags, uint32_t nfixed,
+ Handle<DictionaryPropMap*> map,
+ uint32_t mapLength);
+ static DictionaryShape* new_(JSContext* cx, Handle<NativeObject*> obj);
+
+ DictionaryPropMap* propMap() const {
+ MOZ_ASSERT(isDictionary());
+ MOZ_ASSERT(propMap_);
+ return propMap_->asDictionary();
+ }
+};
+
+// Shape used for a ProxyObject.
+class ProxyShape : public Shape {
+ // Needed to maintain the same size as other shapes.
+ uintptr_t padding_;
+
+ friend class js::gc::CellAllocator;
+ ProxyShape(BaseShape* base, ObjectFlags objectFlags)
+ : Shape(Kind::Proxy, base, objectFlags) {
+ MOZ_ASSERT(base->clasp()->isProxyObject());
+ }
+
+ static ProxyShape* new_(JSContext* cx, Handle<BaseShape*> base,
+ ObjectFlags objectFlags);
+
+ public:
+ static ProxyShape* getShape(JSContext* cx, const JSClass* clasp,
+ JS::Realm* realm, TaggedProto proto,
+ ObjectFlags objectFlags);
+
+ private:
+ static void staticAsserts() {
+ // Silence unused field warning.
+ static_assert(sizeof(padding_) == sizeof(uintptr_t));
+ }
+};
+
+// Shape used for a WasmGCObject.
+class WasmGCShape : public Shape {
+ // The shape's recursion group.
+ const wasm::RecGroup* recGroup_;
+
+ friend class js::gc::CellAllocator;
+ WasmGCShape(BaseShape* base, const wasm::RecGroup* recGroup,
+ ObjectFlags objectFlags)
+ : Shape(Kind::WasmGC, base, objectFlags), recGroup_(recGroup) {
+ MOZ_ASSERT(!base->clasp()->isProxyObject());
+ MOZ_ASSERT(!base->clasp()->isNativeObject());
+ }
+
+ static WasmGCShape* new_(JSContext* cx, Handle<BaseShape*> base,
+ const wasm::RecGroup* recGroup,
+ ObjectFlags objectFlags);
+
+ // Take a reference to the recursion group.
+ inline void init();
+
+ public:
+ static WasmGCShape* getShape(JSContext* cx, const JSClass* clasp,
+ JS::Realm* realm, TaggedProto proto,
+ const wasm::RecGroup* recGroup,
+ ObjectFlags objectFlags);
+
+ // Release the reference to the recursion group.
+ inline void finalize(JS::GCContext* gcx);
+
+ const wasm::RecGroup* recGroup() const {
+ MOZ_ASSERT(isWasmGC());
+ return recGroup_;
+ }
+};
+
+// A type that can be used to get the size of the Shape alloc kind.
+class SizedShape : public Shape {
+ // The various shape kinds have an extra word that is used defined
+ // differently depending on the type.
+ uintptr_t padding_;
+
+ static void staticAsserts() {
+ // Silence unused field warning.
+ static_assert(sizeof(padding_) == sizeof(uintptr_t));
+
+ // Sanity check Shape size is what we expect.
+#ifdef JS_64BIT
+ static_assert(sizeof(SizedShape) == 4 * sizeof(void*));
+#else
+ static_assert(sizeof(SizedShape) == 6 * sizeof(void*));
+#endif
+
+ // All shape kinds must have the same size.
+ static_assert(sizeof(NativeShape) == sizeof(SizedShape));
+ static_assert(sizeof(SharedShape) == sizeof(SizedShape));
+ static_assert(sizeof(DictionaryShape) == sizeof(SizedShape));
+ static_assert(sizeof(ProxyShape) == sizeof(SizedShape));
+ static_assert(sizeof(WasmGCShape) == sizeof(SizedShape));
+ }
+};
+
+inline NativeShape& js::Shape::asNative() {
+ MOZ_ASSERT(isNative());
+ return *static_cast<NativeShape*>(this);
+}
+
+inline SharedShape& js::Shape::asShared() {
+ MOZ_ASSERT(isShared());
+ return *static_cast<SharedShape*>(this);
+}
+
+inline DictionaryShape& js::Shape::asDictionary() {
+ MOZ_ASSERT(isDictionary());
+ return *static_cast<DictionaryShape*>(this);
+}
+
+inline WasmGCShape& js::Shape::asWasmGC() {
+ MOZ_ASSERT(isWasmGC());
+ return *static_cast<WasmGCShape*>(this);
+}
+
+inline const NativeShape& js::Shape::asNative() const {
+ MOZ_ASSERT(isNative());
+ return *static_cast<const NativeShape*>(this);
+}
+
+inline const SharedShape& js::Shape::asShared() const {
+ MOZ_ASSERT(isShared());
+ return *static_cast<const SharedShape*>(this);
+}
+
+inline const DictionaryShape& js::Shape::asDictionary() const {
+ MOZ_ASSERT(isDictionary());
+ return *static_cast<const DictionaryShape*>(this);
+}
+
+inline const WasmGCShape& js::Shape::asWasmGC() const {
+ MOZ_ASSERT(isWasmGC());
+ return *static_cast<const WasmGCShape*>(this);
+}
+
+// Iterator for iterating over a shape's properties. It can be used like this:
+//
+// for (ShapePropertyIter<NoGC> iter(nobj->shape()); !iter.done(); iter++) {
+// PropertyKey key = iter->key();
+// if (iter->isDataProperty() && iter->enumerable()) { .. }
+// }
+//
+// Properties are iterated in reverse order (i.e., iteration starts at the most
+// recently added property).
+template <AllowGC allowGC>
+class MOZ_RAII ShapePropertyIter {
+ typename MaybeRooted<PropMap*, allowGC>::RootType map_;
+ uint32_t mapLength_;
+ const bool isDictionary_;
+
+ protected:
+ ShapePropertyIter(JSContext* cx, NativeShape* shape, bool isDictionary)
+ : map_(cx, shape->propMap()),
+ mapLength_(shape->propMapLength()),
+ isDictionary_(isDictionary) {
+ static_assert(allowGC == CanGC);
+ MOZ_ASSERT(shape->isDictionary() == isDictionary);
+ MOZ_ASSERT(shape->isNative());
+ }
+ ShapePropertyIter(NativeShape* shape, bool isDictionary)
+ : map_(nullptr, shape->propMap()),
+ mapLength_(shape->propMapLength()),
+ isDictionary_(isDictionary) {
+ static_assert(allowGC == NoGC);
+ MOZ_ASSERT(shape->isDictionary() == isDictionary);
+ MOZ_ASSERT(shape->isNative());
+ }
+
+ public:
+ ShapePropertyIter(JSContext* cx, NativeShape* shape)
+ : ShapePropertyIter(cx, shape, shape->isDictionary()) {}
+
+ explicit ShapePropertyIter(NativeShape* shape)
+ : ShapePropertyIter(shape, shape->isDictionary()) {}
+
+ // Deleted constructors: use SharedShapePropertyIter instead.
+ ShapePropertyIter(JSContext* cx, SharedShape* shape) = delete;
+ explicit ShapePropertyIter(SharedShape* shape) = delete;
+
+ bool done() const { return mapLength_ == 0; }
+
+ void operator++(int) {
+ do {
+ MOZ_ASSERT(!done());
+ if (mapLength_ > 1) {
+ mapLength_--;
+ } else if (map_->hasPrevious()) {
+ map_ = map_->asLinked()->previous();
+ mapLength_ = PropMap::Capacity;
+ } else {
+ // Done iterating.
+ map_ = nullptr;
+ mapLength_ = 0;
+ return;
+ }
+ // Dictionary maps can have "holes" for removed properties, so keep going
+ // until we find a non-hole slot.
+ } while (MOZ_UNLIKELY(isDictionary_ && !map_->hasKey(mapLength_ - 1)));
+ }
+
+ PropertyInfoWithKey get() const {
+ MOZ_ASSERT(!done());
+ return map_->getPropertyInfoWithKey(mapLength_ - 1);
+ }
+
+ PropertyInfoWithKey operator*() const { return get(); }
+
+ // Fake pointer struct to make operator-> work.
+ // See https://stackoverflow.com/a/52856349.
+ struct FakePtr {
+ PropertyInfoWithKey val_;
+ const PropertyInfoWithKey* operator->() const { return &val_; }
+ };
+ FakePtr operator->() const { return {get()}; }
+};
+
+// Optimized version of ShapePropertyIter for non-dictionary shapes. It passes
+// `false` for `isDictionary_`, which will let the compiler optimize away the
+// loop structure in ShapePropertyIter::operator++.
+template <AllowGC allowGC>
+class MOZ_RAII SharedShapePropertyIter : public ShapePropertyIter<allowGC> {
+ public:
+ SharedShapePropertyIter(JSContext* cx, SharedShape* shape)
+ : ShapePropertyIter<allowGC>(cx, shape, /* isDictionary = */ false) {}
+
+ explicit SharedShapePropertyIter(SharedShape* shape)
+ : ShapePropertyIter<allowGC>(shape, /* isDictionary = */ false) {}
+};
+
+} // namespace js
+
+// JS::ubi::Nodes can point to Shapes and BaseShapes; they're js::gc::Cell
+// instances that occupy a compartment.
+namespace JS {
+namespace ubi {
+
+template <>
+class Concrete<js::Shape> : TracerConcrete<js::Shape> {
+ protected:
+ explicit Concrete(js::Shape* ptr) : TracerConcrete<js::Shape>(ptr) {}
+
+ public:
+ static void construct(void* storage, js::Shape* ptr) {
+ new (storage) Concrete(ptr);
+ }
+
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override;
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+template <>
+class Concrete<js::BaseShape> : TracerConcrete<js::BaseShape> {
+ protected:
+ explicit Concrete(js::BaseShape* ptr) : TracerConcrete<js::BaseShape>(ptr) {}
+
+ public:
+ static void construct(void* storage, js::BaseShape* ptr) {
+ new (storage) Concrete(ptr);
+ }
+
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override;
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+} // namespace ubi
+} // namespace JS
+
+#endif /* vm_Shape_h */
diff --git a/js/src/vm/ShapeZone.cpp b/js/src/vm/ShapeZone.cpp
new file mode 100644
index 0000000000..760fde2e69
--- /dev/null
+++ b/js/src/vm/ShapeZone.cpp
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JS symbol tables. */
+
+#include "vm/ShapeZone.h"
+
+#include "gc/Marking-inl.h"
+#include "vm/Shape-inl.h"
+
+using namespace js;
+
+void ShapeZone::fixupPropMapShapeTableAfterMovingGC() {
+ for (PropMapShapeSet::Enum e(propMapShapes); !e.empty(); e.popFront()) {
+ SharedShape* shape = MaybeForwarded(e.front().unbarrieredGet());
+ SharedPropMap* map = shape->propMapMaybeForwarded();
+ BaseShape* base = MaybeForwarded(shape->base());
+
+ PropMapShapeSet::Lookup lookup(base, shape->numFixedSlots(), map,
+ shape->propMapLength(),
+ shape->objectFlags());
+ e.rekeyFront(lookup, shape);
+ }
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void ShapeZone::checkTablesAfterMovingGC() {
+ // Assert that the moving GC worked and that nothing is left in the tables
+ // that points into the nursery, and that the hash table entries are
+ // discoverable.
+
+ for (auto r = initialPropMaps.all(); !r.empty(); r.popFront()) {
+ SharedPropMap* map = r.front().unbarrieredGet();
+ CheckGCThingAfterMovingGC(map);
+
+ InitialPropMapHasher::Lookup lookup(map->getKey(0),
+ map->getPropertyInfo(0));
+ InitialPropMapSet::Ptr ptr = initialPropMaps.lookup(lookup);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+
+ for (auto r = baseShapes.all(); !r.empty(); r.popFront()) {
+ BaseShape* base = r.front().unbarrieredGet();
+ CheckGCThingAfterMovingGC(base);
+
+ BaseShapeHasher::Lookup lookup(base->clasp(), base->realm(), base->proto());
+ BaseShapeSet::Ptr ptr = baseShapes.lookup(lookup);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+
+ for (auto r = initialShapes.all(); !r.empty(); r.popFront()) {
+ SharedShape* shape = r.front().unbarrieredGet();
+ CheckGCThingAfterMovingGC(shape);
+
+ using Lookup = InitialShapeHasher::Lookup;
+ Lookup lookup(shape->getObjectClass(), shape->realm(), shape->proto(),
+ shape->numFixedSlots(), shape->objectFlags());
+ InitialShapeSet::Ptr ptr = initialShapes.lookup(lookup);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+
+ for (auto r = propMapShapes.all(); !r.empty(); r.popFront()) {
+ SharedShape* shape = r.front().unbarrieredGet();
+ CheckGCThingAfterMovingGC(shape);
+
+ using Lookup = PropMapShapeHasher::Lookup;
+ Lookup lookup(shape->base(), shape->numFixedSlots(), shape->propMap(),
+ shape->propMapLength(), shape->objectFlags());
+ PropMapShapeSet::Ptr ptr = propMapShapes.lookup(lookup);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+
+ for (auto r = proxyShapes.all(); !r.empty(); r.popFront()) {
+ ProxyShape* shape = r.front().unbarrieredGet();
+ CheckGCThingAfterMovingGC(shape);
+
+ using Lookup = ProxyShapeHasher::Lookup;
+ Lookup lookup(shape->getObjectClass(), shape->realm(), shape->proto(),
+ shape->objectFlags());
+ ProxyShapeSet::Ptr ptr = proxyShapes.lookup(lookup);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+
+ for (auto r = wasmGCShapes.all(); !r.empty(); r.popFront()) {
+ WasmGCShape* shape = r.front().unbarrieredGet();
+ CheckGCThingAfterMovingGC(shape);
+
+ using Lookup = WasmGCShapeHasher::Lookup;
+ Lookup lookup(shape->getObjectClass(), shape->realm(), shape->proto(),
+ shape->recGroup(), shape->objectFlags());
+ WasmGCShapeSet::Ptr ptr = wasmGCShapes.lookup(lookup);
+ MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
+ }
+}
+#endif // JSGC_HASH_TABLE_CHECKS
+
+ShapeZone::ShapeZone(Zone* zone)
+ : baseShapes(zone),
+ initialPropMaps(zone),
+ initialShapes(zone),
+ propMapShapes(zone),
+ proxyShapes(zone),
+ wasmGCShapes(zone) {}
+
+void ShapeZone::purgeShapeCaches(JS::GCContext* gcx) {
+ for (Shape* shape : shapesWithCache) {
+ MaybeForwarded(shape)->purgeCache(gcx);
+ }
+ shapesWithCache.clearAndFree();
+}
+
+void ShapeZone::addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* initialPropMapTable,
+ size_t* shapeTables) {
+ *shapeTables += baseShapes.sizeOfExcludingThis(mallocSizeOf);
+ *initialPropMapTable += initialPropMaps.sizeOfExcludingThis(mallocSizeOf);
+ *shapeTables += initialShapes.sizeOfExcludingThis(mallocSizeOf);
+ *shapeTables += propMapShapes.sizeOfExcludingThis(mallocSizeOf);
+ *shapeTables += proxyShapes.sizeOfExcludingThis(mallocSizeOf);
+ *shapeTables += wasmGCShapes.sizeOfExcludingThis(mallocSizeOf);
+ *shapeTables += shapesWithCache.sizeOfExcludingThis(mallocSizeOf);
+}
diff --git a/js/src/vm/ShapeZone.h b/js/src/vm/ShapeZone.h
new file mode 100644
index 0000000000..72abfbf1fb
--- /dev/null
+++ b/js/src/vm/ShapeZone.h
@@ -0,0 +1,244 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ShapeZone_h
+#define vm_ShapeZone_h
+
+#include "mozilla/MemoryReporting.h"
+
+#include "gc/Barrier.h"
+#include "js/GCHashTable.h"
+#include "vm/PropertyKey.h"
+#include "vm/PropMap.h"
+#include "vm/Shape.h"
+#include "vm/TaggedProto.h"
+
+namespace js {
+
+// Hash policy for the per-zone baseShapes set.
+struct BaseShapeHasher {
+ struct Lookup {
+ const JSClass* clasp;
+ JS::Realm* realm;
+ TaggedProto proto;
+
+ Lookup(const JSClass* clasp, JS::Realm* realm, TaggedProto proto)
+ : clasp(clasp), realm(realm), proto(proto) {}
+ };
+
+ static HashNumber hash(const Lookup& lookup) {
+ HashNumber hash = StableCellHasher<TaggedProto>::hash(lookup.proto);
+ return mozilla::AddToHash(hash, lookup.clasp, lookup.realm);
+ }
+ static bool match(const WeakHeapPtr<BaseShape*>& key, const Lookup& lookup) {
+ return key.unbarrieredGet()->clasp() == lookup.clasp &&
+ key.unbarrieredGet()->realm() == lookup.realm &&
+ key.unbarrieredGet()->proto() == lookup.proto;
+ }
+};
+using BaseShapeSet = JS::WeakCache<
+ JS::GCHashSet<WeakHeapPtr<BaseShape*>, BaseShapeHasher, SystemAllocPolicy>>;
+
+// Hash policy for the per-zone initialPropMaps set, mapping property key + info
+// to a shared property map.
+struct InitialPropMapHasher {
+ struct Lookup {
+ PropertyKey key;
+ PropertyInfo prop;
+
+ Lookup(PropertyKey key, PropertyInfo prop) : key(key), prop(prop) {}
+ };
+ static HashNumber hash(const Lookup& lookup) {
+ HashNumber hash = HashPropertyKey(lookup.key);
+ return mozilla::AddToHash(hash, lookup.prop.toRaw());
+ }
+ static bool match(const WeakHeapPtr<SharedPropMap*>& key,
+ const Lookup& lookup) {
+ const SharedPropMap* map = key.unbarrieredGet();
+ return map->matchProperty(0, lookup.key, lookup.prop);
+ }
+};
+using InitialPropMapSet =
+ JS::WeakCache<JS::GCHashSet<WeakHeapPtr<SharedPropMap*>,
+ InitialPropMapHasher, SystemAllocPolicy>>;
+
+// Helper class to hash information relevant for all shapes.
+struct ShapeBaseHasher {
+ struct Lookup {
+ const JSClass* clasp;
+ JS::Realm* realm;
+ TaggedProto proto;
+ ObjectFlags objectFlags;
+
+ Lookup(const JSClass* clasp, JS::Realm* realm, const TaggedProto& proto,
+ ObjectFlags objectFlags)
+ : clasp(clasp), realm(realm), proto(proto), objectFlags(objectFlags) {}
+ };
+
+ static HashNumber hash(const Lookup& lookup) {
+ HashNumber hash = StableCellHasher<TaggedProto>::hash(lookup.proto);
+ return mozilla::AddToHash(hash, lookup.clasp, lookup.realm,
+ lookup.objectFlags.toRaw());
+ }
+ static bool match(const Shape* shape, const Lookup& lookup) {
+ return lookup.clasp == shape->getObjectClass() &&
+ lookup.realm == shape->realm() && lookup.proto == shape->proto() &&
+ lookup.objectFlags == shape->objectFlags();
+ }
+};
+
+// Hash policy for the per-zone initialShapes set storing initial shapes for
+// objects in the zone.
+//
+// These are empty shapes, except for certain classes (e.g. String, RegExp)
+// which may add certain baked-in properties. See insertInitialShape.
+struct InitialShapeHasher {
+ struct Lookup : public ShapeBaseHasher::Lookup {
+ uint32_t nfixed;
+
+ Lookup(const JSClass* clasp, JS::Realm* realm, const TaggedProto& proto,
+ uint32_t nfixed, ObjectFlags objectFlags)
+ : ShapeBaseHasher::Lookup(clasp, realm, proto, objectFlags),
+ nfixed(nfixed) {}
+ };
+
+ static HashNumber hash(const Lookup& lookup) {
+ HashNumber hash = ShapeBaseHasher::hash(lookup);
+ return mozilla::AddToHash(hash, lookup.nfixed);
+ }
+ static bool match(const WeakHeapPtr<SharedShape*>& key,
+ const Lookup& lookup) {
+ const SharedShape* shape = key.unbarrieredGet();
+ return ShapeBaseHasher::match(shape, lookup) &&
+ lookup.nfixed == shape->numFixedSlots();
+ }
+};
+using InitialShapeSet =
+ JS::WeakCache<JS::GCHashSet<WeakHeapPtr<SharedShape*>, InitialShapeHasher,
+ SystemAllocPolicy>>;
+
+// Hash policy for the per-zone propMapShapes set storing shared shapes with
+// shared property maps.
+struct PropMapShapeHasher {
+ struct Lookup {
+ BaseShape* base;
+ SharedPropMap* map;
+ uint32_t mapLength;
+ uint32_t nfixed;
+ ObjectFlags objectFlags;
+
+ Lookup(BaseShape* base, uint32_t nfixed, SharedPropMap* map,
+ uint32_t mapLength, ObjectFlags objectFlags)
+ : base(base),
+ map(map),
+ mapLength(mapLength),
+ nfixed(nfixed),
+ objectFlags(objectFlags) {}
+ };
+
+ static HashNumber hash(const Lookup& lookup) {
+ return mozilla::HashGeneric(lookup.base, lookup.map, lookup.mapLength,
+ lookup.nfixed, lookup.objectFlags.toRaw());
+ }
+ static bool match(const WeakHeapPtr<SharedShape*>& key,
+ const Lookup& lookup) {
+ const SharedShape* shape = key.unbarrieredGet();
+ return lookup.base == shape->base() &&
+ lookup.nfixed == shape->numFixedSlots() &&
+ lookup.map == shape->propMap() &&
+ lookup.mapLength == shape->propMapLength() &&
+ lookup.objectFlags == shape->objectFlags();
+ }
+ static void rekey(WeakHeapPtr<SharedShape*>& k,
+ const WeakHeapPtr<SharedShape*>& newKey) {
+ k = newKey;
+ }
+};
+using PropMapShapeSet =
+ JS::WeakCache<JS::GCHashSet<WeakHeapPtr<SharedShape*>, PropMapShapeHasher,
+ SystemAllocPolicy>>;
+
+// Hash policy for the per-zone proxyShapes set storing shapes for proxy objects
+// in the zone.
+struct ProxyShapeHasher : public ShapeBaseHasher {
+ static bool match(const WeakHeapPtr<ProxyShape*>& key, const Lookup& lookup) {
+ const ProxyShape* shape = key.unbarrieredGet();
+ return ShapeBaseHasher::match(shape, lookup);
+ }
+};
+using ProxyShapeSet =
+ JS::WeakCache<JS::GCHashSet<WeakHeapPtr<ProxyShape*>, ProxyShapeHasher,
+ SystemAllocPolicy>>;
+
+// Hash policy for the per-zone wasmGCShapes set storing shapes for Wasm GC
+// objects in the zone.
+struct WasmGCShapeHasher : public ShapeBaseHasher {
+ struct Lookup : public ShapeBaseHasher::Lookup {
+ const wasm::RecGroup* recGroup;
+
+ Lookup(const JSClass* clasp, JS::Realm* realm, const TaggedProto& proto,
+ const wasm::RecGroup* recGroup, ObjectFlags objectFlags)
+ : ShapeBaseHasher::Lookup(clasp, realm, proto, objectFlags),
+ recGroup(recGroup) {}
+ };
+
+ static HashNumber hash(const Lookup& lookup) {
+ HashNumber hash = ShapeBaseHasher::hash(lookup);
+ hash = mozilla::AddToHash(hash, lookup.recGroup);
+ return hash;
+ }
+
+ static bool match(const WeakHeapPtr<WasmGCShape*>& key,
+ const Lookup& lookup) {
+ const WasmGCShape* shape = key.unbarrieredGet();
+ return ShapeBaseHasher::match(shape, lookup) &&
+ shape->recGroup() == lookup.recGroup;
+ }
+};
+using WasmGCShapeSet =
+ JS::WeakCache<JS::GCHashSet<WeakHeapPtr<WasmGCShape*>, WasmGCShapeHasher,
+ SystemAllocPolicy>>;
+
+struct ShapeZone {
+ // Set of all base shapes in the Zone.
+ BaseShapeSet baseShapes;
+
+ // Set used to look up a shared property map based on the first property's
+ // PropertyKey and PropertyInfo.
+ InitialPropMapSet initialPropMaps;
+
+ // Set of initial shapes in the Zone.
+ InitialShapeSet initialShapes;
+
+ // Set of SharedPropMapShapes in the Zone.
+ PropMapShapeSet propMapShapes;
+
+ // Set of ProxyShapes in the Zone.
+ ProxyShapeSet proxyShapes;
+
+ // Set of WasmGCShapes in the Zone.
+ WasmGCShapeSet wasmGCShapes;
+
+ using ShapeWithCacheVector = js::Vector<js::Shape*, 0, js::SystemAllocPolicy>;
+ ShapeWithCacheVector shapesWithCache;
+
+ explicit ShapeZone(Zone* zone);
+
+ void purgeShapeCaches(JS::GCContext* gcx);
+
+ void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* initialPropMapTable, size_t* shapeTables);
+
+ void fixupPropMapShapeTableAfterMovingGC();
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkTablesAfterMovingGC();
+#endif
+};
+
+} // namespace js
+
+#endif /* vm_ShapeZone_h */
diff --git a/js/src/vm/SharedArrayObject.cpp b/js/src/vm/SharedArrayObject.cpp
new file mode 100644
index 0000000000..52d1bdde69
--- /dev/null
+++ b/js/src/vm/SharedArrayObject.cpp
@@ -0,0 +1,588 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/SharedArrayObject.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/TaggedAnonymousMemory.h"
+
+#include "gc/GCContext.h"
+#include "gc/Memory.h"
+#include "jit/AtomicOperations.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/PropertySpec.h"
+#include "js/SharedArrayBuffer.h"
+#include "util/Memory.h"
+#include "util/WindowsWrapper.h"
+#include "vm/SharedMem.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmMemory.h"
+
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+
+using js::wasm::Pages;
+using mozilla::DebugOnly;
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+
+using namespace js;
+using namespace js::jit;
+
+static size_t WasmSharedArrayAccessibleSize(size_t length) {
+ return AlignBytes(length, gc::SystemPageSize());
+}
+
+static size_t NonWasmSharedArrayAllocSize(size_t length) {
+ MOZ_ASSERT(length <= ArrayBufferObject::MaxByteLength);
+ return sizeof(SharedArrayRawBuffer) + length;
+}
+
+// The mapped size for a plain shared array buffer, used only for tracking
+// memory usage. This is incorrect for some WASM cases, and for hypothetical
+// callers of js::SharedArrayBufferObject::createFromNewRawBuffer that do not
+// currently exist, but it's fine as a signal of GC pressure.
+static size_t SharedArrayMappedSize(bool isWasm, size_t length) {
+ // Wasm buffers use MapBufferMemory and allocate a full page for the header.
+ // Non-Wasm buffers use malloc.
+ if (isWasm) {
+ return WasmSharedArrayAccessibleSize(length) + gc::SystemPageSize();
+ }
+ return NonWasmSharedArrayAllocSize(length);
+}
+
+SharedArrayRawBuffer* SharedArrayRawBuffer::Allocate(size_t length) {
+ MOZ_RELEASE_ASSERT(length <= ArrayBufferObject::MaxByteLength);
+
+ size_t allocSize = NonWasmSharedArrayAllocSize(length);
+ uint8_t* p = js_pod_calloc<uint8_t>(allocSize);
+ if (!p) {
+ return nullptr;
+ }
+
+ uint8_t* buffer = p + sizeof(SharedArrayRawBuffer);
+ auto* rawbuf =
+ new (p) SharedArrayRawBuffer(/* isWasm = */ false, buffer, length);
+ MOZ_ASSERT(rawbuf->length_ == length); // Deallocation needs this
+ return rawbuf;
+}
+
+WasmSharedArrayRawBuffer* WasmSharedArrayRawBuffer::AllocateWasm(
+ wasm::IndexType indexType, Pages initialPages, wasm::Pages clampedMaxPages,
+ const mozilla::Maybe<wasm::Pages>& sourceMaxPages,
+ const mozilla::Maybe<size_t>& mappedSize) {
+ // Prior code has asserted that initial pages is within our implementation
+ // limits (wasm::MaxMemoryPages()) and we can assume it is a valid size_t.
+ MOZ_ASSERT(initialPages.hasByteLength());
+ size_t length = initialPages.byteLength();
+
+ MOZ_RELEASE_ASSERT(length <= ArrayBufferObject::MaxByteLength);
+
+ size_t accessibleSize = WasmSharedArrayAccessibleSize(length);
+ if (accessibleSize < length) {
+ return nullptr;
+ }
+
+ size_t computedMappedSize = mappedSize.isSome()
+ ? *mappedSize
+ : wasm::ComputeMappedSize(clampedMaxPages);
+ MOZ_ASSERT(accessibleSize <= computedMappedSize);
+
+ uint64_t mappedSizeWithHeader = computedMappedSize + gc::SystemPageSize();
+ uint64_t accessibleSizeWithHeader = accessibleSize + gc::SystemPageSize();
+
+ void* p = MapBufferMemory(indexType, mappedSizeWithHeader,
+ accessibleSizeWithHeader);
+ if (!p) {
+ return nullptr;
+ }
+
+ uint8_t* buffer = reinterpret_cast<uint8_t*>(p) + gc::SystemPageSize();
+ uint8_t* base = buffer - sizeof(WasmSharedArrayRawBuffer);
+ auto* rawbuf = new (base) WasmSharedArrayRawBuffer(
+ buffer, length, indexType, clampedMaxPages,
+ sourceMaxPages.valueOr(Pages(0)), computedMappedSize);
+ MOZ_ASSERT(rawbuf->length_ == length); // Deallocation needs this
+ return rawbuf;
+}
+
+void WasmSharedArrayRawBuffer::tryGrowMaxPagesInPlace(Pages deltaMaxPages) {
+ Pages newMaxPages = clampedMaxPages_;
+ DebugOnly<bool> valid = newMaxPages.checkedIncrement(deltaMaxPages);
+ // Caller must ensure increment does not overflow or increase over the
+ // specified maximum pages.
+ MOZ_ASSERT(valid);
+ MOZ_ASSERT(newMaxPages <= sourceMaxPages_);
+
+ size_t newMappedSize = wasm::ComputeMappedSize(newMaxPages);
+ MOZ_ASSERT(mappedSize_ <= newMappedSize);
+ if (mappedSize_ == newMappedSize) {
+ return;
+ }
+
+ if (!ExtendBufferMapping(basePointer(), mappedSize_, newMappedSize)) {
+ return;
+ }
+
+ mappedSize_ = newMappedSize;
+ clampedMaxPages_ = newMaxPages;
+}
+
+bool WasmSharedArrayRawBuffer::wasmGrowToPagesInPlace(const Lock&,
+ wasm::IndexType t,
+ wasm::Pages newPages) {
+ // Check that the new pages is within our allowable range. This will
+ // simultaneously check against the maximum specified in source and our
+ // implementation limits.
+ if (newPages > clampedMaxPages_) {
+ return false;
+ }
+ MOZ_ASSERT(newPages <= wasm::MaxMemoryPages(t) &&
+ newPages.byteLength() <= ArrayBufferObject::MaxByteLength);
+
+ // We have checked against the clamped maximum and so we know we can convert
+ // to byte lengths now.
+ size_t newLength = newPages.byteLength();
+
+ MOZ_ASSERT(newLength >= length_);
+
+ if (newLength == length_) {
+ return true;
+ }
+
+ size_t delta = newLength - length_;
+ MOZ_ASSERT(delta % wasm::PageSize == 0);
+
+ uint8_t* dataEnd = dataPointerShared().unwrap(/* for resize */) + length_;
+ MOZ_ASSERT(uintptr_t(dataEnd) % gc::SystemPageSize() == 0);
+
+ if (!CommitBufferMemory(dataEnd, delta)) {
+ return false;
+ }
+
+ // We rely on CommitBufferMemory (and therefore memmap/VirtualAlloc) to only
+ // return once it has committed memory for all threads. We only update with a
+ // new length once this has occurred.
+ length_ = newLength;
+
+ return true;
+}
+
+void WasmSharedArrayRawBuffer::discard(size_t byteOffset, size_t byteLen) {
+ SharedMem<uint8_t*> memBase = dataPointerShared();
+
+ // The caller is responsible for ensuring these conditions are met; see this
+ // function's comment in SharedArrayObject.h.
+ MOZ_ASSERT(byteOffset % wasm::PageSize == 0);
+ MOZ_ASSERT(byteLen % wasm::PageSize == 0);
+ MOZ_ASSERT(wasm::MemoryBoundsCheck(uint64_t(byteOffset), uint64_t(byteLen),
+ volatileByteLength()));
+
+ // Discarding zero bytes "succeeds" with no effect.
+ if (byteLen == 0) {
+ return;
+ }
+
+ SharedMem<uint8_t*> addr = memBase + uintptr_t(byteOffset);
+
+ // On POSIX-ish platforms, we discard memory by overwriting previously-mapped
+ // pages with freshly-mapped pages (which are all zeroed). The operating
+ // system recognizes this and decreases the process RSS, and eventually
+ // collects the abandoned physical pages.
+ //
+ // On Windows, committing over previously-committed pages has no effect. We
+ // could decommit and recommit, but this doesn't work for shared memories
+ // since other threads could access decommitted memory - causing a trap.
+ // Instead, we simply zero memory (memset 0), and then VirtualUnlock(), which
+ // for Historical Reasons immediately removes the pages from the working set.
+ // And then, because the pages were zeroed, Windows will actually reclaim the
+ // memory entirely instead of paging it out to disk. Naturally this behavior
+ // is not officially documented, but a Raymond Chen blog post is basically as
+ // good as MSDN, right?
+ //
+ // https://devblogs.microsoft.com/oldnewthing/20170113-00/?p=95185
+
+#ifdef XP_WIN
+ // Discarding the entire region at once causes us to page the entire region
+ // into the working set, only to throw it out again. This can be actually
+ // disastrous when discarding already-discarded memory. To mitigate this, we
+ // discard a chunk of memory at a time - this comes at a small performance
+ // cost from syscalls and potentially less-optimal memsets.
+ size_t numPages = byteLen / wasm::PageSize;
+ for (size_t i = 0; i < numPages; i++) {
+ AtomicOperations::memsetSafeWhenRacy(addr + (i * wasm::PageSize), 0,
+ wasm::PageSize);
+ DebugOnly<bool> result =
+ VirtualUnlock(addr.unwrap() + (i * wasm::PageSize), wasm::PageSize);
+ MOZ_ASSERT(!result); // this always "fails" when unlocking unlocked
+ // memory...which is the only case we care about
+ }
+#elif defined(__wasi__)
+ AtomicOperations::memsetSafeWhenRacy(addr, 0, byteLen);
+#else // !XP_WIN
+ void* data = MozTaggedAnonymousMmap(
+ addr.unwrap(), byteLen, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0, "wasm-reserved");
+ if (data == MAP_FAILED) {
+ MOZ_CRASH("failed to discard wasm memory; memory mappings may be broken");
+ }
+#endif
+}
+
+bool SharedArrayRawBuffer::addReference() {
+ MOZ_RELEASE_ASSERT(refcount_ > 0);
+
+ // Be careful never to overflow the refcount field.
+ for (;;) {
+ uint32_t old_refcount = refcount_;
+ uint32_t new_refcount = old_refcount + 1;
+ if (new_refcount == 0) {
+ return false;
+ }
+ if (refcount_.compareExchange(old_refcount, new_refcount)) {
+ return true;
+ }
+ }
+}
+
+void SharedArrayRawBuffer::dropReference() {
+ // Normally if the refcount is zero then the memory will have been unmapped
+ // and this test may just crash, but if the memory has been retained for any
+ // reason we will catch the underflow here.
+ MOZ_RELEASE_ASSERT(refcount_ > 0);
+
+ // Drop the reference to the buffer.
+ uint32_t new_refcount = --refcount_; // Atomic.
+ if (new_refcount) {
+ return;
+ }
+
+ // This was the final reference, so release the buffer.
+ if (isWasm()) {
+ WasmSharedArrayRawBuffer* wasmBuf = toWasmBuffer();
+ wasm::IndexType indexType = wasmBuf->wasmIndexType();
+ uint8_t* basePointer = wasmBuf->basePointer();
+ size_t mappedSizeWithHeader = wasmBuf->mappedSize() + gc::SystemPageSize();
+ // Call the destructor to destroy the growLock_ Mutex.
+ wasmBuf->~WasmSharedArrayRawBuffer();
+ UnmapBufferMemory(indexType, basePointer, mappedSizeWithHeader);
+ } else {
+ js_delete(this);
+ }
+}
+
+static bool IsSharedArrayBuffer(HandleValue v) {
+ return v.isObject() && v.toObject().is<SharedArrayBufferObject>();
+}
+
+MOZ_ALWAYS_INLINE bool SharedArrayBufferObject::byteLengthGetterImpl(
+ JSContext* cx, const CallArgs& args) {
+ MOZ_ASSERT(IsSharedArrayBuffer(args.thisv()));
+ auto* buffer = &args.thisv().toObject().as<SharedArrayBufferObject>();
+ args.rval().setNumber(buffer->byteLength());
+ return true;
+}
+
+bool SharedArrayBufferObject::byteLengthGetter(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsSharedArrayBuffer, byteLengthGetterImpl>(cx,
+ args);
+}
+
+// ES2017 draft rev 6390c2f1b34b309895d31d8c0512eac8660a0210
+// 24.2.2.1 SharedArrayBuffer( length )
+bool SharedArrayBufferObject::class_constructor(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Step 1.
+ if (!ThrowIfNotConstructing(cx, args, "SharedArrayBuffer")) {
+ return false;
+ }
+
+ // Step 2.
+ uint64_t byteLength;
+ if (!ToIndex(cx, args.get(0), &byteLength)) {
+ return false;
+ }
+
+ // Step 3 (Inlined 24.2.1.1 AllocateSharedArrayBuffer).
+ // 24.2.1.1, step 1 (Inlined 9.1.14 OrdinaryCreateFromConstructor).
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_SharedArrayBuffer,
+ &proto)) {
+ return false;
+ }
+
+ // 24.2.1.1, step 3 (Inlined 6.2.7.2 CreateSharedByteDataBlock, step 2).
+ // Refuse to allocate too large buffers.
+ if (byteLength > ArrayBufferObject::MaxByteLength) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SHARED_ARRAY_BAD_LENGTH);
+ return false;
+ }
+
+ // 24.2.1.1, steps 1 and 4-6.
+ JSObject* bufobj = New(cx, byteLength, proto);
+ if (!bufobj) {
+ return false;
+ }
+ args.rval().setObject(*bufobj);
+ return true;
+}
+
+SharedArrayBufferObject* SharedArrayBufferObject::New(JSContext* cx,
+ size_t length,
+ HandleObject proto) {
+ SharedArrayRawBuffer* buffer = SharedArrayRawBuffer::Allocate(length);
+ if (!buffer) {
+ js::ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ SharedArrayBufferObject* obj = New(cx, buffer, length, proto);
+ if (!obj) {
+ buffer->dropReference();
+ return nullptr;
+ }
+
+ return obj;
+}
+
+SharedArrayBufferObject* SharedArrayBufferObject::New(
+ JSContext* cx, SharedArrayRawBuffer* buffer, size_t length,
+ HandleObject proto) {
+ MOZ_ASSERT(cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled());
+
+ AutoSetNewObjectMetadata metadata(cx);
+ Rooted<SharedArrayBufferObject*> obj(
+ cx, NewObjectWithClassProto<SharedArrayBufferObject>(cx, proto));
+ if (!obj) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(obj->getClass() == &class_);
+
+ cx->runtime()->incSABCount();
+
+ if (!obj->acceptRawBuffer(buffer, length)) {
+ js::ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ return obj;
+}
+
+bool SharedArrayBufferObject::acceptRawBuffer(SharedArrayRawBuffer* buffer,
+ size_t length) {
+ if (!zone()->addSharedMemory(buffer,
+ SharedArrayMappedSize(buffer->isWasm(), length),
+ MemoryUse::SharedArrayRawBuffer)) {
+ return false;
+ }
+
+ setFixedSlot(RAWBUF_SLOT, PrivateValue(buffer));
+ setFixedSlot(LENGTH_SLOT, PrivateValue(length));
+ return true;
+}
+
+void SharedArrayBufferObject::dropRawBuffer() {
+ size_t size = SharedArrayMappedSize(isWasm(), byteLength());
+ zoneFromAnyThread()->removeSharedMemory(rawBufferObject(), size,
+ MemoryUse::SharedArrayRawBuffer);
+ rawBufferObject()->dropReference();
+ setFixedSlot(RAWBUF_SLOT, UndefinedValue());
+}
+
+SharedArrayRawBuffer* SharedArrayBufferObject::rawBufferObject() const {
+ Value v = getFixedSlot(RAWBUF_SLOT);
+ MOZ_ASSERT(!v.isUndefined());
+ return reinterpret_cast<SharedArrayRawBuffer*>(v.toPrivate());
+}
+
+void SharedArrayBufferObject::Finalize(JS::GCContext* gcx, JSObject* obj) {
+ // Must be foreground finalizable so that we can account for the object.
+ MOZ_ASSERT(gcx->onMainThread());
+ gcx->runtime()->decSABCount();
+
+ SharedArrayBufferObject& buf = obj->as<SharedArrayBufferObject>();
+
+ // Detect the case of failure during SharedArrayBufferObject creation,
+ // which causes a SharedArrayRawBuffer to never be attached.
+ Value v = buf.getFixedSlot(RAWBUF_SLOT);
+ if (!v.isUndefined()) {
+ buf.dropRawBuffer();
+ }
+}
+
+/* static */
+void SharedArrayBufferObject::addSizeOfExcludingThis(
+ JSObject* obj, mozilla::MallocSizeOf mallocSizeOf, JS::ClassInfo* info,
+ JS::RuntimeSizes* runtimeSizes) {
+ // Divide the buffer size by the refcount to get the fraction of the buffer
+ // owned by this thread. It's conceivable that the refcount might change in
+ // the middle of memory reporting, in which case the amount reported for
+ // some threads might be to high (if the refcount goes up) or too low (if
+ // the refcount goes down). But that's unlikely and hard to avoid, so we
+ // just live with the risk.
+ const SharedArrayBufferObject& buf = obj->as<SharedArrayBufferObject>();
+ size_t owned = buf.byteLength() / buf.rawBufferObject()->refcount();
+ if (buf.isWasm()) {
+ info->objectsNonHeapElementsWasmShared += owned;
+ if (runtimeSizes) {
+ size_t ownedGuardPages = (buf.wasmMappedSize() - buf.byteLength()) /
+ buf.rawBufferObject()->refcount();
+ runtimeSizes->wasmGuardPages += ownedGuardPages;
+ }
+ } else {
+ info->objectsNonHeapElementsShared += owned;
+ }
+}
+
+/* static */
+void SharedArrayBufferObject::copyData(
+ Handle<ArrayBufferObjectMaybeShared*> toBuffer, size_t toIndex,
+ Handle<ArrayBufferObjectMaybeShared*> fromBuffer, size_t fromIndex,
+ size_t count) {
+ MOZ_ASSERT(toBuffer->byteLength() >= count);
+ MOZ_ASSERT(toBuffer->byteLength() >= toIndex + count);
+ MOZ_ASSERT(fromBuffer->byteLength() >= fromIndex);
+ MOZ_ASSERT(fromBuffer->byteLength() >= fromIndex + count);
+
+ jit::AtomicOperations::memcpySafeWhenRacy(
+ toBuffer->dataPointerEither() + toIndex,
+ fromBuffer->dataPointerEither() + fromIndex, count);
+}
+
+SharedArrayBufferObject* SharedArrayBufferObject::createFromNewRawBuffer(
+ JSContext* cx, WasmSharedArrayRawBuffer* buffer, size_t initialSize) {
+ MOZ_ASSERT(cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled());
+
+ AutoSetNewObjectMetadata metadata(cx);
+ SharedArrayBufferObject* obj =
+ NewBuiltinClassInstance<SharedArrayBufferObject>(cx);
+ if (!obj) {
+ buffer->dropReference();
+ return nullptr;
+ }
+
+ cx->runtime()->incSABCount();
+
+ if (!obj->acceptRawBuffer(buffer, initialSize)) {
+ buffer->dropReference();
+ return nullptr;
+ }
+
+ return obj;
+}
+
+/* static */
+void SharedArrayBufferObject::wasmDiscard(HandleSharedArrayBufferObject buf,
+ uint64_t byteOffset,
+ uint64_t byteLen) {
+ MOZ_ASSERT(buf->isWasm());
+ buf->rawWasmBufferObject()->discard(byteOffset, byteLen);
+}
+
+static const JSClassOps SharedArrayBufferObjectClassOps = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ SharedArrayBufferObject::Finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ nullptr, // trace
+};
+
+static const JSFunctionSpec sharedarrray_functions[] = {JS_FS_END};
+
+static const JSPropertySpec sharedarrray_properties[] = {
+ JS_SELF_HOSTED_SYM_GET(species, "$SharedArrayBufferSpecies", 0), JS_PS_END};
+
+static const JSFunctionSpec sharedarray_proto_functions[] = {
+ JS_SELF_HOSTED_FN("slice", "SharedArrayBufferSlice", 2, 0), JS_FS_END};
+
+static const JSPropertySpec sharedarray_proto_properties[] = {
+ JS_PSG("byteLength", SharedArrayBufferObject::byteLengthGetter, 0),
+ JS_STRING_SYM_PS(toStringTag, "SharedArrayBuffer", JSPROP_READONLY),
+ JS_PS_END};
+
+static const ClassSpec SharedArrayBufferObjectClassSpec = {
+ GenericCreateConstructor<SharedArrayBufferObject::class_constructor, 1,
+ gc::AllocKind::FUNCTION>,
+ GenericCreatePrototype<SharedArrayBufferObject>,
+ sharedarrray_functions,
+ sharedarrray_properties,
+ sharedarray_proto_functions,
+ sharedarray_proto_properties};
+
+const JSClass SharedArrayBufferObject::class_ = {
+ "SharedArrayBuffer",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(SharedArrayBufferObject::RESERVED_SLOTS) |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_SharedArrayBuffer) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &SharedArrayBufferObjectClassOps, &SharedArrayBufferObjectClassSpec,
+ JS_NULL_CLASS_EXT};
+
+const JSClass SharedArrayBufferObject::protoClass_ = {
+ "SharedArrayBuffer.prototype",
+ JSCLASS_HAS_CACHED_PROTO(JSProto_SharedArrayBuffer), JS_NULL_CLASS_OPS,
+ &SharedArrayBufferObjectClassSpec};
+
+JS_PUBLIC_API size_t JS::GetSharedArrayBufferByteLength(JSObject* obj) {
+ auto* aobj = obj->maybeUnwrapAs<SharedArrayBufferObject>();
+ return aobj ? aobj->byteLength() : 0;
+}
+
+JS_PUBLIC_API void JS::GetSharedArrayBufferLengthAndData(JSObject* obj,
+ size_t* length,
+ bool* isSharedMemory,
+ uint8_t** data) {
+ MOZ_ASSERT(obj->is<SharedArrayBufferObject>());
+ *length = obj->as<SharedArrayBufferObject>().byteLength();
+ *data = obj->as<SharedArrayBufferObject>().dataPointerShared().unwrap(
+ /*safe - caller knows*/);
+ *isSharedMemory = true;
+}
+
+JS_PUBLIC_API JSObject* JS::NewSharedArrayBuffer(JSContext* cx, size_t nbytes) {
+ MOZ_ASSERT(cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled());
+
+ if (nbytes > ArrayBufferObject::MaxByteLength) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SHARED_ARRAY_BAD_LENGTH);
+ return nullptr;
+ }
+
+ return SharedArrayBufferObject::New(cx, nbytes,
+ /* proto = */ nullptr);
+}
+
+JS_PUBLIC_API bool JS::IsSharedArrayBufferObject(JSObject* obj) {
+ return obj->canUnwrapAs<SharedArrayBufferObject>();
+}
+
+JS_PUBLIC_API uint8_t* JS::GetSharedArrayBufferData(
+ JSObject* obj, bool* isSharedMemory, const JS::AutoRequireNoGC&) {
+ auto* aobj = obj->maybeUnwrapAs<SharedArrayBufferObject>();
+ if (!aobj) {
+ return nullptr;
+ }
+ *isSharedMemory = true;
+ return aobj->dataPointerShared().unwrap(/*safe - caller knows*/);
+}
+
+JS_PUBLIC_API bool JS::ContainsSharedArrayBuffer(JSContext* cx) {
+ return cx->runtime()->hasLiveSABs();
+}
diff --git a/js/src/vm/SharedArrayObject.h b/js/src/vm/SharedArrayObject.h
new file mode 100644
index 0000000000..9a3dddaf0f
--- /dev/null
+++ b/js/src/vm/SharedArrayObject.h
@@ -0,0 +1,327 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_SharedArrayObject_h
+#define vm_SharedArrayObject_h
+
+#include "mozilla/Atomics.h"
+
+#include "jstypes.h"
+
+#include "gc/Memory.h"
+#include "vm/ArrayBufferObject.h"
+#include "wasm/WasmMemory.h"
+
+namespace js {
+
+class FutexWaiter;
+class WasmSharedArrayRawBuffer;
+
+/*
+ * SharedArrayRawBuffer
+ *
+ * A bookkeeping object always stored before the raw buffer. The buffer itself
+ * is refcounted. SharedArrayBufferObjects and structured clone objects may hold
+ * references.
+ *
+ * WasmSharedArrayRawBuffer is a derived class that's used for Wasm buffers.
+ *
+ * - Non-Wasm buffers are allocated with a single calloc allocation, like this:
+ *
+ * |<------ sizeof ------>|<- length ->|
+ * | SharedArrayRawBuffer | data array |
+ *
+ * - Wasm buffers are allocated with MapBufferMemory (mmap), like this:
+ *
+ * |<-------- sizeof -------->|<- length ->|
+ * | waste | WasmSharedArrayRawBuffer | data array | waste |
+ *
+ * Observe that if we want to map the data array on a specific address, such
+ * as absolute zero (bug 1056027), then the {Wasm}SharedArrayRawBuffer cannot be
+ * prefixed to the data array, it has to be a separate object, also in
+ * shared memory. (That would get rid of ~4KB of waste, as well.) Very little
+ * else would have to change throughout the engine, the SARB would point to
+ * the data array using a constant pointer, instead of computing its
+ * address.
+ *
+ * For Wasm buffers, length_ can change following initialization; it may grow
+ * toward sourceMaxPages_. See extensive comments above WasmArrayRawBuffer in
+ * ArrayBufferObject.cpp. length_ only grows when the lock is held.
+ */
+class SharedArrayRawBuffer {
+ protected:
+ // Whether this is a WasmSharedArrayRawBuffer.
+ bool isWasm_;
+
+ mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> refcount_;
+ mozilla::Atomic<size_t, mozilla::SequentiallyConsistent> length_;
+
+ // A list of structures representing tasks waiting on some
+ // location within this buffer.
+ FutexWaiter* waiters_ = nullptr;
+
+ protected:
+ SharedArrayRawBuffer(bool isWasm, uint8_t* buffer, size_t length)
+ : isWasm_(isWasm), refcount_(1), length_(length) {
+ MOZ_ASSERT(buffer == dataPointerShared());
+ }
+
+ public:
+ static SharedArrayRawBuffer* Allocate(size_t length);
+
+ inline WasmSharedArrayRawBuffer* toWasmBuffer();
+
+ // This may be called from multiple threads. The caller must take
+ // care of mutual exclusion.
+ FutexWaiter* waiters() const { return waiters_; }
+
+ // This may be called from multiple threads. The caller must take
+ // care of mutual exclusion.
+ void setWaiters(FutexWaiter* waiters) { waiters_ = waiters; }
+
+ inline SharedMem<uint8_t*> dataPointerShared() const;
+
+ size_t volatileByteLength() const { return length_; }
+
+ bool isWasm() const { return isWasm_; }
+
+ uint32_t refcount() const { return refcount_; }
+
+ [[nodiscard]] bool addReference();
+ void dropReference();
+
+ static int32_t liveBuffers();
+};
+
+class WasmSharedArrayRawBuffer : public SharedArrayRawBuffer {
+ private:
+ Mutex growLock_ MOZ_UNANNOTATED;
+ // The index type of this buffer.
+ wasm::IndexType indexType_;
+ // The maximum size of this buffer in wasm pages.
+ wasm::Pages clampedMaxPages_;
+ wasm::Pages sourceMaxPages_;
+ size_t mappedSize_; // Does not include the page for the header.
+
+ uint8_t* basePointer() {
+ SharedMem<uint8_t*> p = dataPointerShared() - gc::SystemPageSize();
+ MOZ_ASSERT(p.asValue() % gc::SystemPageSize() == 0);
+ return p.unwrap(/* we trust you won't abuse it */);
+ }
+
+ protected:
+ WasmSharedArrayRawBuffer(uint8_t* buffer, size_t length,
+ wasm::IndexType indexType,
+ wasm::Pages clampedMaxPages,
+ wasm::Pages sourceMaxPages, size_t mappedSize)
+ : SharedArrayRawBuffer(/* isWasm = */ true, buffer, length),
+ growLock_(mutexid::SharedArrayGrow),
+ indexType_(indexType),
+ clampedMaxPages_(clampedMaxPages),
+ sourceMaxPages_(sourceMaxPages),
+ mappedSize_(mappedSize) {}
+
+ public:
+ friend class SharedArrayRawBuffer;
+
+ class Lock;
+ friend class Lock;
+
+ class MOZ_RAII Lock {
+ WasmSharedArrayRawBuffer* buf;
+
+ public:
+ explicit Lock(WasmSharedArrayRawBuffer* buf) : buf(buf) {
+ buf->growLock_.lock();
+ }
+ ~Lock() { buf->growLock_.unlock(); }
+ };
+
+ static WasmSharedArrayRawBuffer* AllocateWasm(
+ wasm::IndexType indexType, wasm::Pages initialPages,
+ wasm::Pages clampedMaxPages,
+ const mozilla::Maybe<wasm::Pages>& sourceMaxPages,
+ const mozilla::Maybe<size_t>& mappedSize);
+
+ static const WasmSharedArrayRawBuffer* fromDataPtr(const uint8_t* dataPtr) {
+ return reinterpret_cast<const WasmSharedArrayRawBuffer*>(
+ dataPtr - sizeof(WasmSharedArrayRawBuffer));
+ }
+
+ static WasmSharedArrayRawBuffer* fromDataPtr(uint8_t* dataPtr) {
+ return reinterpret_cast<WasmSharedArrayRawBuffer*>(
+ dataPtr - sizeof(WasmSharedArrayRawBuffer));
+ }
+
+ wasm::IndexType wasmIndexType() const { return indexType_; }
+
+ wasm::Pages volatileWasmPages() const {
+ return wasm::Pages::fromByteLengthExact(length_);
+ }
+
+ wasm::Pages wasmClampedMaxPages() const { return clampedMaxPages_; }
+ wasm::Pages wasmSourceMaxPages() const { return sourceMaxPages_; }
+
+ size_t mappedSize() const { return mappedSize_; }
+
+ void tryGrowMaxPagesInPlace(wasm::Pages deltaMaxPages);
+
+ bool wasmGrowToPagesInPlace(const Lock&, wasm::IndexType t,
+ wasm::Pages newPages);
+
+ // Discard a region of memory, zeroing the pages and releasing physical memory
+ // back to the operating system. byteOffset and byteLen must be wasm page
+ // aligned and in bounds. A discard of zero bytes will have no effect.
+ void discard(size_t byteOffset, size_t byteLen);
+};
+
+inline WasmSharedArrayRawBuffer* SharedArrayRawBuffer::toWasmBuffer() {
+ MOZ_ASSERT(isWasm());
+ return static_cast<WasmSharedArrayRawBuffer*>(this);
+}
+
+inline SharedMem<uint8_t*> SharedArrayRawBuffer::dataPointerShared() const {
+ uint8_t* ptr =
+ reinterpret_cast<uint8_t*>(const_cast<SharedArrayRawBuffer*>(this));
+ ptr += isWasm() ? sizeof(WasmSharedArrayRawBuffer)
+ : sizeof(SharedArrayRawBuffer);
+ return SharedMem<uint8_t*>::shared(ptr);
+}
+
+/*
+ * SharedArrayBufferObject
+ *
+ * When transferred to a WebWorker, the buffer is not detached on the
+ * parent side, and both child and parent reference the same buffer.
+ *
+ * The underlying memory is memory-mapped and reference counted
+ * (across workers and/or processes). The SharedArrayBuffer object
+ * has a finalizer that decrements the refcount, the last one to leave
+ * (globally) unmaps the memory. The sender ups the refcount before
+ * transmitting the memory to another worker.
+ *
+ * SharedArrayBufferObject (or really the underlying memory) /is
+ * racy/: more than one worker can access the memory at the same time.
+ *
+ * A TypedArrayObject (a view) references a SharedArrayBuffer
+ * and keeps it alive. The SharedArrayBuffer does /not/ reference its
+ * views.
+ */
+class SharedArrayBufferObject : public ArrayBufferObjectMaybeShared {
+ static bool byteLengthGetterImpl(JSContext* cx, const CallArgs& args);
+
+ public:
+ // RAWBUF_SLOT holds a pointer (as "private" data) to the
+ // SharedArrayRawBuffer object, which is manually managed storage.
+ static const uint8_t RAWBUF_SLOT = 0;
+
+ // LENGTH_SLOT holds the length of the underlying buffer as it was when this
+ // object was created. For JS use cases this is the same length as the
+ // buffer, but for Wasm the buffer can grow, and the buffer's length may be
+ // greater than the object's length.
+ static const uint8_t LENGTH_SLOT = 1;
+
+ static_assert(LENGTH_SLOT == ArrayBufferObject::BYTE_LENGTH_SLOT,
+ "JIT code assumes the same slot is used for the length");
+
+ static const uint8_t RESERVED_SLOTS = 2;
+
+ static const JSClass class_;
+ static const JSClass protoClass_;
+
+ static bool byteLengthGetter(JSContext* cx, unsigned argc, Value* vp);
+
+ static bool class_constructor(JSContext* cx, unsigned argc, Value* vp);
+
+ static bool isOriginalByteLengthGetter(Native native) {
+ return native == byteLengthGetter;
+ }
+
+ // Create a SharedArrayBufferObject with a new SharedArrayRawBuffer.
+ static SharedArrayBufferObject* New(JSContext* cx, size_t length,
+ HandleObject proto = nullptr);
+
+ // Create a SharedArrayBufferObject using an existing SharedArrayRawBuffer,
+ // recording the given length in the SharedArrayBufferObject.
+ static SharedArrayBufferObject* New(JSContext* cx,
+ SharedArrayRawBuffer* buffer,
+ size_t length,
+ HandleObject proto = nullptr);
+
+ static void Finalize(JS::GCContext* gcx, JSObject* obj);
+
+ static void addSizeOfExcludingThis(JSObject* obj,
+ mozilla::MallocSizeOf mallocSizeOf,
+ JS::ClassInfo* info,
+ JS::RuntimeSizes* runtimeSizes);
+
+ static void copyData(Handle<ArrayBufferObjectMaybeShared*> toBuffer,
+ size_t toIndex,
+ Handle<ArrayBufferObjectMaybeShared*> fromBuffer,
+ size_t fromIndex, size_t count);
+
+ SharedArrayRawBuffer* rawBufferObject() const;
+
+ WasmSharedArrayRawBuffer* rawWasmBufferObject() const {
+ return rawBufferObject()->toWasmBuffer();
+ }
+
+ // Invariant: This method does not cause GC and can be called
+ // without anchoring the object it is called on.
+ uintptr_t globalID() const {
+ // The buffer address is good enough as an ID provided the memory is not
+ // shared between processes or, if it is, it is mapped to the same address
+ // in every process. (At the moment, shared memory cannot be shared between
+ // processes.)
+ return dataPointerShared().asValue();
+ }
+
+ size_t byteLength() const {
+ return size_t(getFixedSlot(LENGTH_SLOT).toPrivate());
+ }
+
+ bool isWasm() const { return rawBufferObject()->isWasm(); }
+ SharedMem<uint8_t*> dataPointerShared() const {
+ return rawBufferObject()->dataPointerShared();
+ }
+
+ // WebAssembly support:
+
+ // Create a SharedArrayBufferObject using the provided buffer and size.
+ // Assumes ownership of a reference to |buffer| even in case of failure,
+ // i.e. on failure |buffer->dropReference()| is performed.
+ static SharedArrayBufferObject* createFromNewRawBuffer(
+ JSContext* cx, WasmSharedArrayRawBuffer* buffer, size_t initialSize);
+
+ wasm::Pages volatileWasmPages() const {
+ return rawWasmBufferObject()->volatileWasmPages();
+ }
+ wasm::Pages wasmClampedMaxPages() const {
+ return rawWasmBufferObject()->wasmClampedMaxPages();
+ }
+ wasm::Pages wasmSourceMaxPages() const {
+ return rawWasmBufferObject()->wasmSourceMaxPages();
+ }
+
+ size_t wasmMappedSize() const { return rawWasmBufferObject()->mappedSize(); }
+
+ static void wasmDiscard(Handle<SharedArrayBufferObject*> buf,
+ uint64_t byteOffset, uint64_t byteLength);
+
+ private:
+ [[nodiscard]] bool acceptRawBuffer(SharedArrayRawBuffer* buffer,
+ size_t length);
+ void dropRawBuffer();
+};
+
+using RootedSharedArrayBufferObject = Rooted<SharedArrayBufferObject*>;
+using HandleSharedArrayBufferObject = Handle<SharedArrayBufferObject*>;
+using MutableHandleSharedArrayBufferObject =
+ MutableHandle<SharedArrayBufferObject*>;
+
+} // namespace js
+
+#endif // vm_SharedArrayObject_h
diff --git a/js/src/vm/SharedImmutableStringsCache-inl.h b/js/src/vm/SharedImmutableStringsCache-inl.h
new file mode 100644
index 0000000000..34690b202d
--- /dev/null
+++ b/js/src/vm/SharedImmutableStringsCache-inl.h
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_SharedImmutableStringsCache_inl_h
+#define vm_SharedImmutableStringsCache_inl_h
+
+#include "vm/SharedImmutableStringsCache.h"
+
+namespace js {
+
+template <typename IntoOwnedChars>
+[[nodiscard]] SharedImmutableString SharedImmutableStringsCache::getOrCreate(
+ const char* chars, size_t length, IntoOwnedChars intoOwnedChars) {
+ MOZ_ASSERT(inner_);
+ MOZ_ASSERT(chars);
+ Hasher::Lookup lookup(Hasher::hashLongString(chars, length), chars, length);
+
+ auto locked = inner_->lock();
+ auto entry = locked->set.lookupForAdd(lookup);
+ if (!entry) {
+ OwnedChars ownedChars(intoOwnedChars());
+ if (!ownedChars) {
+ return SharedImmutableString();
+ }
+ MOZ_ASSERT(ownedChars.get() == chars ||
+ memcmp(ownedChars.get(), chars, length) == 0);
+ auto box = StringBox::Create(std::move(ownedChars), length, inner_);
+ if (!box || !locked->set.add(entry, std::move(box))) {
+ return SharedImmutableString();
+ }
+ }
+
+ MOZ_ASSERT(entry && *entry);
+ return SharedImmutableString(entry->get());
+}
+
+template <typename IntoOwnedTwoByteChars>
+[[nodiscard]] SharedImmutableTwoByteString
+SharedImmutableStringsCache::getOrCreate(
+ const char16_t* chars, size_t length,
+ IntoOwnedTwoByteChars intoOwnedTwoByteChars) {
+ MOZ_ASSERT(inner_);
+ MOZ_ASSERT(chars);
+ auto hash = Hasher::hashLongString(reinterpret_cast<const char*>(chars),
+ length * sizeof(char16_t));
+ Hasher::Lookup lookup(hash, chars, length);
+
+ auto locked = inner_->lock();
+ auto entry = locked->set.lookupForAdd(lookup);
+ if (!entry) {
+ OwnedTwoByteChars ownedTwoByteChars(intoOwnedTwoByteChars());
+ if (!ownedTwoByteChars) {
+ return SharedImmutableTwoByteString();
+ }
+ MOZ_ASSERT(
+ ownedTwoByteChars.get() == chars ||
+ memcmp(ownedTwoByteChars.get(), chars, length * sizeof(char16_t)) == 0);
+ OwnedChars ownedChars(reinterpret_cast<char*>(ownedTwoByteChars.release()));
+ auto box = StringBox::Create(std::move(ownedChars),
+ length * sizeof(char16_t), inner_);
+ if (!box || !locked->set.add(entry, std::move(box))) {
+ return SharedImmutableTwoByteString();
+ }
+ }
+
+ MOZ_ASSERT(entry && *entry);
+ return SharedImmutableTwoByteString(entry->get());
+}
+
+} // namespace js
+
+#endif // vm_SharedImmutableStringsCache_inl_h
diff --git a/js/src/vm/SharedImmutableStringsCache.cpp b/js/src/vm/SharedImmutableStringsCache.cpp
new file mode 100644
index 0000000000..4428121400
--- /dev/null
+++ b/js/src/vm/SharedImmutableStringsCache.cpp
@@ -0,0 +1,147 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/SharedImmutableStringsCache-inl.h"
+
+#include "util/Text.h"
+#include "vm/MutexIDs.h" // js::mutexid
+#include "vm/Runtime.h" // JSRuntime
+
+namespace js {
+
+/* static */
+SharedImmutableStringsCache SharedImmutableStringsCache::singleton_;
+
+SharedImmutableString::SharedImmutableString(
+ SharedImmutableStringsCache::StringBox* box)
+ : box_(box) {
+ MOZ_ASSERT(box);
+ box->refcount++;
+}
+
+SharedImmutableString::SharedImmutableString(SharedImmutableString&& rhs)
+ : box_(rhs.box_) {
+ MOZ_ASSERT(this != &rhs, "self move not allowed");
+
+ MOZ_ASSERT_IF(rhs.box_, rhs.box_->refcount > 0);
+
+ rhs.box_ = nullptr;
+}
+
+SharedImmutableString& SharedImmutableString::operator=(
+ SharedImmutableString&& rhs) {
+ this->~SharedImmutableString();
+ new (this) SharedImmutableString(std::move(rhs));
+ return *this;
+}
+
+SharedImmutableTwoByteString::SharedImmutableTwoByteString(
+ SharedImmutableString&& string)
+ : string_(std::move(string)) {}
+
+SharedImmutableTwoByteString::SharedImmutableTwoByteString(
+ SharedImmutableStringsCache::StringBox* box)
+ : string_(box) {
+ MOZ_ASSERT(box->length() % sizeof(char16_t) == 0);
+}
+
+SharedImmutableTwoByteString::SharedImmutableTwoByteString(
+ SharedImmutableTwoByteString&& rhs)
+ : string_(std::move(rhs.string_)) {
+ MOZ_ASSERT(this != &rhs, "self move not allowed");
+}
+
+SharedImmutableTwoByteString& SharedImmutableTwoByteString::operator=(
+ SharedImmutableTwoByteString&& rhs) {
+ this->~SharedImmutableTwoByteString();
+ new (this) SharedImmutableTwoByteString(std::move(rhs));
+ return *this;
+}
+
+SharedImmutableString::~SharedImmutableString() {
+ if (!box_) {
+ return;
+ }
+
+ auto locked = box_->cache_->lock();
+
+ MOZ_ASSERT(box_->refcount > 0);
+
+ box_->refcount--;
+ if (box_->refcount == 0) {
+ box_->chars_.reset(nullptr);
+ }
+}
+
+SharedImmutableString SharedImmutableString::clone() const {
+ auto locked = box_->cache_->lock();
+ MOZ_ASSERT(box_);
+ MOZ_ASSERT(box_->refcount > 0);
+ return SharedImmutableString(box_);
+}
+
+SharedImmutableTwoByteString SharedImmutableTwoByteString::clone() const {
+ return SharedImmutableTwoByteString(string_.clone());
+}
+
+[[nodiscard]] SharedImmutableString SharedImmutableStringsCache::getOrCreate(
+ OwnedChars&& chars, size_t length) {
+ OwnedChars owned(std::move(chars));
+ MOZ_ASSERT(owned);
+ return getOrCreate(owned.get(), length, [&]() { return std::move(owned); });
+}
+
+[[nodiscard]] SharedImmutableString SharedImmutableStringsCache::getOrCreate(
+ const char* chars, size_t length) {
+ return getOrCreate(chars, length,
+ [&]() { return DuplicateString(chars, length); });
+}
+
+bool SharedImmutableStringsCache::init() {
+ MOZ_ASSERT(!inner_);
+
+ auto* inner =
+ js_new<ExclusiveData<Inner>>(mutexid::SharedImmutableStringsCache);
+ if (!inner) {
+ return false;
+ }
+
+ auto locked = inner->lock();
+ inner_ = locked.parent();
+
+ return true;
+}
+
+void SharedImmutableStringsCache::free() {
+ if (inner_) {
+ js_delete(inner_);
+ inner_ = nullptr;
+ }
+}
+
+bool SharedImmutableStringsCache::initSingleton() { return singleton_.init(); }
+
+void SharedImmutableStringsCache::freeSingleton() {
+ if (!JSRuntime::hasLiveRuntimes()) {
+ singleton_.free();
+ }
+}
+
+[[nodiscard]] SharedImmutableTwoByteString
+SharedImmutableStringsCache::getOrCreate(OwnedTwoByteChars&& chars,
+ size_t length) {
+ OwnedTwoByteChars owned(std::move(chars));
+ MOZ_ASSERT(owned);
+ return getOrCreate(owned.get(), length, [&]() { return std::move(owned); });
+}
+
+[[nodiscard]] SharedImmutableTwoByteString
+SharedImmutableStringsCache::getOrCreate(const char16_t* chars, size_t length) {
+ return getOrCreate(chars, length,
+ [&]() { return DuplicateString(chars, length); });
+}
+
+} // namespace js
diff --git a/js/src/vm/SharedImmutableStringsCache.h b/js/src/vm/SharedImmutableStringsCache.h
new file mode 100644
index 0000000000..206bce697f
--- /dev/null
+++ b/js/src/vm/SharedImmutableStringsCache.h
@@ -0,0 +1,425 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_SharedImmutableStringsCache_h
+#define vm_SharedImmutableStringsCache_h
+
+#include "mozilla/Maybe.h"
+#include "mozilla/UniquePtr.h"
+
+#include <cstring>
+#include <new> // for placement new
+#include <utility> // std::move
+
+#include "js/AllocPolicy.h"
+#include "js/HashTable.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+
+#include "threading/ExclusiveData.h"
+
+namespace js {
+
+class SharedImmutableString;
+class SharedImmutableTwoByteString;
+
+/**
+ * The `SharedImmutableStringsCache` allows safely sharing and deduplicating
+ * immutable strings (either `const char*` [any encoding, not restricted to
+ * only Latin-1 or only UTF-8] or `const char16_t*`) between threads.
+ *
+ * The locking mechanism is dead-simple and coarse grained: a single lock guards
+ * all of the internal table itself, the table's entries, and the entries'
+ * reference counts. It is only safe to perform any mutation on the cache or any
+ * data stored within the cache when this lock is acquired.
+ */
+class SharedImmutableStringsCache {
+ static SharedImmutableStringsCache singleton_;
+
+ friend class SharedImmutableString;
+ friend class SharedImmutableTwoByteString;
+ struct Hasher;
+
+ public:
+ using OwnedChars = JS::UniqueChars;
+ using OwnedTwoByteChars = JS::UniqueTwoByteChars;
+
+ /**
+ * Get the canonical, shared, and de-duplicated version of the given `const
+ * char*` string. If such a string does not exist, call `intoOwnedChars` and
+ * add the string it returns to the cache.
+ *
+ * `intoOwnedChars` must create an owned version of the given string, and
+ * must have one of the following types:
+ *
+ * JS::UniqueChars intoOwnedChars();
+ * JS::UniqueChars&& intoOwnedChars();
+ *
+ * It can be used by callers to elide a copy of the string when it is safe
+ * to give up ownership of the lookup string to the cache. It must return a
+ * `nullptr` on failure.
+ *
+ * On success, `Some` is returned. In the case of OOM failure, `Nothing` is
+ * returned.
+ */
+ template <typename IntoOwnedChars>
+ [[nodiscard]] SharedImmutableString getOrCreate(
+ const char* chars, size_t length, IntoOwnedChars intoOwnedChars);
+
+ /**
+ * Take ownership of the given `chars` and return the canonical, shared and
+ * de-duplicated version.
+ *
+ * On success, `Some` is returned. In the case of OOM failure, `Nothing` is
+ * returned.
+ */
+ [[nodiscard]] SharedImmutableString getOrCreate(OwnedChars&& chars,
+ size_t length);
+
+ /**
+ * Do not take ownership of the given `chars`. Return the canonical, shared
+ * and de-duplicated version. If there is no extant shared version of
+ * `chars`, make a copy and insert it into the cache.
+ *
+ * On success, `Some` is returned. In the case of OOM failure, `Nothing` is
+ * returned.
+ */
+ [[nodiscard]] SharedImmutableString getOrCreate(const char* chars,
+ size_t length);
+
+ /**
+ * Get the canonical, shared, and de-duplicated version of the given `const
+ * char16_t*` string. If such a string does not exist, call `intoOwnedChars`
+ * and add the string it returns to the cache.
+ *
+ * `intoOwnedTwoByteChars` must create an owned version of the given string,
+ * and must have one of the following types:
+ *
+ * JS::UniqueTwoByteChars intoOwnedTwoByteChars();
+ * JS::UniqueTwoByteChars&& intoOwnedTwoByteChars();
+ *
+ * It can be used by callers to elide a copy of the string when it is safe
+ * to give up ownership of the lookup string to the cache. It must return a
+ * `nullptr` on failure.
+ *
+ * On success, `Some` is returned. In the case of OOM failure, `Nothing` is
+ * returned.
+ */
+ template <typename IntoOwnedTwoByteChars>
+ [[nodiscard]] SharedImmutableTwoByteString getOrCreate(
+ const char16_t* chars, size_t length,
+ IntoOwnedTwoByteChars intoOwnedTwoByteChars);
+
+ /**
+ * Take ownership of the given `chars` and return the canonical, shared and
+ * de-duplicated version.
+ *
+ * On success, `Some` is returned. In the case of OOM failure, `Nothing` is
+ * returned.
+ */
+ [[nodiscard]] SharedImmutableTwoByteString getOrCreate(
+ OwnedTwoByteChars&& chars, size_t length);
+
+ /**
+ * Do not take ownership of the given `chars`. Return the canonical, shared
+ * and de-duplicated version. If there is no extant shared version of
+ * `chars`, then make a copy and insert it into the cache.
+ *
+ * On success, `Some` is returned. In the case of OOM failure, `Nothing` is
+ * returned.
+ */
+ [[nodiscard]] SharedImmutableTwoByteString getOrCreate(const char16_t* chars,
+ size_t length);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ MOZ_ASSERT(inner_);
+ size_t n = mallocSizeOf(inner_);
+
+ auto locked = inner_->lock();
+
+ // Size of the table.
+ n += locked->set.shallowSizeOfExcludingThis(mallocSizeOf);
+
+ // Sizes of the strings and their boxes.
+ for (auto r = locked->set.all(); !r.empty(); r.popFront()) {
+ n += mallocSizeOf(r.front().get());
+ if (const char* chars = r.front()->chars()) {
+ n += mallocSizeOf(chars);
+ }
+ }
+
+ return n;
+ }
+
+ private:
+ bool init();
+ void free();
+
+ public:
+ static bool initSingleton();
+ static void freeSingleton();
+
+ static SharedImmutableStringsCache& getSingleton() {
+ MOZ_ASSERT(singleton_.inner_);
+ return singleton_;
+ }
+
+ private:
+ SharedImmutableStringsCache() = default;
+ ~SharedImmutableStringsCache() = default;
+
+ public:
+ SharedImmutableStringsCache(const SharedImmutableStringsCache& rhs) = delete;
+ SharedImmutableStringsCache(SharedImmutableStringsCache&& rhs) = delete;
+
+ SharedImmutableStringsCache& operator=(SharedImmutableStringsCache&& rhs) =
+ delete;
+
+ SharedImmutableStringsCache& operator=(const SharedImmutableStringsCache&) =
+ delete;
+
+ /**
+ * Purge the cache of all refcount == 0 entries.
+ */
+ void purge() {
+ auto locked = inner_->lock();
+
+ for (Inner::Set::Enum e(locked->set); !e.empty(); e.popFront()) {
+ if (e.front()->refcount == 0) {
+ // The chars should be eagerly freed when refcount reaches zero.
+ MOZ_ASSERT(!e.front()->chars());
+ e.removeFront();
+ } else {
+ // The chars should exist as long as the refcount is non-zero.
+ MOZ_ASSERT(e.front()->chars());
+ }
+ }
+ }
+
+ private:
+ struct Inner;
+ class StringBox {
+ friend class SharedImmutableString;
+
+ OwnedChars chars_;
+ size_t length_;
+ const ExclusiveData<Inner>* cache_;
+
+ public:
+ mutable size_t refcount;
+
+ using Ptr = js::UniquePtr<StringBox>;
+
+ StringBox(OwnedChars&& chars, size_t length,
+ const ExclusiveData<Inner>* cache)
+ : chars_(std::move(chars)),
+ length_(length),
+ cache_(cache),
+ refcount(0) {
+ MOZ_ASSERT(chars_);
+ }
+
+ static Ptr Create(OwnedChars&& chars, size_t length,
+ const ExclusiveData<Inner>* cache) {
+ return js::MakeUnique<StringBox>(std::move(chars), length, cache);
+ }
+
+ StringBox(const StringBox&) = delete;
+ StringBox& operator=(const StringBox&) = delete;
+
+ ~StringBox() {
+ MOZ_RELEASE_ASSERT(
+ refcount == 0,
+ "There are `SharedImmutable[TwoByte]String`s outliving their "
+ "associated cache! This always leads to use-after-free in the "
+ "`~SharedImmutableString` destructor!");
+ }
+
+ const char* chars() const { return chars_.get(); }
+ size_t length() const { return length_; }
+ };
+
+ struct Hasher {
+ /**
+ * A structure used when querying for a `const char*` string in the cache.
+ */
+ class Lookup {
+ friend struct Hasher;
+
+ HashNumber hash_;
+ const char* chars_;
+ size_t length_;
+
+ public:
+ Lookup(HashNumber hash, const char* chars, size_t length)
+ : hash_(hash), chars_(chars), length_(length) {
+ MOZ_ASSERT(chars_);
+ MOZ_ASSERT(hash == Hasher::hashLongString(chars, length));
+ }
+
+ Lookup(HashNumber hash, const char16_t* chars, size_t length)
+ : Lookup(hash, reinterpret_cast<const char*>(chars),
+ length * sizeof(char16_t)) {}
+ };
+
+ static const size_t SHORT_STRING_MAX_LENGTH = 8192;
+ static const size_t HASH_CHUNK_LENGTH = SHORT_STRING_MAX_LENGTH / 2;
+
+ // For strings longer than SHORT_STRING_MAX_LENGTH, we only hash the
+ // first HASH_CHUNK_LENGTH and last HASH_CHUNK_LENGTH characters in the
+ // string. This increases the risk of collisions, but in practice it
+ // should be rare, and it yields a large speedup for hashing long
+ // strings.
+ static HashNumber hashLongString(const char* chars, size_t length) {
+ MOZ_ASSERT(chars);
+ return length <= SHORT_STRING_MAX_LENGTH
+ ? mozilla::HashString(chars, length)
+ : mozilla::AddToHash(
+ mozilla::HashString(chars, HASH_CHUNK_LENGTH),
+ mozilla::HashString(chars + length - HASH_CHUNK_LENGTH,
+ HASH_CHUNK_LENGTH));
+ }
+
+ static HashNumber hash(const Lookup& lookup) { return lookup.hash_; }
+
+ static bool match(const StringBox::Ptr& key, const Lookup& lookup) {
+ MOZ_ASSERT(lookup.chars_);
+
+ if (!key->chars() || key->length() != lookup.length_) {
+ return false;
+ }
+
+ if (key->chars() == lookup.chars_) {
+ return true;
+ }
+
+ return memcmp(key->chars(), lookup.chars_, key->length()) == 0;
+ }
+ };
+
+ // The `Inner` struct contains the actual cached contents and shared between
+ // the `SharedImmutableStringsCache` singleton and all
+ // `SharedImmutable[TwoByte]String` holders.
+ struct Inner {
+ using Set = HashSet<StringBox::Ptr, Hasher, SystemAllocPolicy>;
+
+ Set set;
+
+ Inner() : set() {}
+
+ Inner(const Inner&) = delete;
+ Inner& operator=(const Inner&) = delete;
+ };
+
+ const ExclusiveData<Inner>* inner_ = nullptr;
+};
+
+/**
+ * The `SharedImmutableString` class holds a reference to a `const char*` string
+ * from the `SharedImmutableStringsCache` and releases the reference upon
+ * destruction.
+ */
+class SharedImmutableString {
+ friend class SharedImmutableStringsCache;
+ friend class SharedImmutableTwoByteString;
+
+ mutable SharedImmutableStringsCache::StringBox* box_;
+
+ explicit SharedImmutableString(SharedImmutableStringsCache::StringBox* box);
+
+ public:
+ /**
+ * `SharedImmutableString`s are move-able. It is an error to use a
+ * `SharedImmutableString` after it has been moved.
+ */
+ SharedImmutableString(SharedImmutableString&& rhs);
+ SharedImmutableString& operator=(SharedImmutableString&& rhs);
+ SharedImmutableString() { box_ = nullptr; }
+
+ /**
+ * Create another shared reference to the underlying string.
+ */
+ SharedImmutableString clone() const;
+
+ // If you want a copy, take one explicitly with `clone`!
+ SharedImmutableString& operator=(const SharedImmutableString&) = delete;
+ explicit operator bool() const { return box_ != nullptr; }
+
+ ~SharedImmutableString();
+
+ /**
+ * Get a raw pointer to the underlying string. It is only safe to use the
+ * resulting pointer while this `SharedImmutableString` exists.
+ */
+ const char* chars() const {
+ MOZ_ASSERT(box_);
+ MOZ_ASSERT(box_->refcount > 0);
+ MOZ_ASSERT(box_->chars());
+ return box_->chars();
+ }
+
+ /**
+ * Get the length of the underlying string.
+ */
+ size_t length() const {
+ MOZ_ASSERT(box_);
+ MOZ_ASSERT(box_->refcount > 0);
+ MOZ_ASSERT(box_->chars());
+ return box_->length();
+ }
+};
+
+/**
+ * The `SharedImmutableTwoByteString` class holds a reference to a `const
+ * char16_t*` string from the `SharedImmutableStringsCache` and releases the
+ * reference upon destruction.
+ */
+class SharedImmutableTwoByteString {
+ friend class SharedImmutableStringsCache;
+
+ // If a `char*` string and `char16_t*` string happen to have the same bytes,
+ // the bytes will be shared but handed out as different types.
+ SharedImmutableString string_;
+
+ explicit SharedImmutableTwoByteString(SharedImmutableString&& string);
+ explicit SharedImmutableTwoByteString(
+ SharedImmutableStringsCache::StringBox* box);
+
+ public:
+ /**
+ * `SharedImmutableTwoByteString`s are move-able. It is an error to use a
+ * `SharedImmutableTwoByteString` after it has been moved.
+ */
+ SharedImmutableTwoByteString(SharedImmutableTwoByteString&& rhs);
+ SharedImmutableTwoByteString& operator=(SharedImmutableTwoByteString&& rhs);
+ SharedImmutableTwoByteString() { string_.box_ = nullptr; }
+
+ /**
+ * Create another shared reference to the underlying string.
+ */
+ SharedImmutableTwoByteString clone() const;
+
+ // If you want a copy, take one explicitly with `clone`!
+ SharedImmutableTwoByteString& operator=(const SharedImmutableTwoByteString&) =
+ delete;
+ explicit operator bool() const { return string_.box_ != nullptr; }
+ /**
+ * Get a raw pointer to the underlying string. It is only safe to use the
+ * resulting pointer while this `SharedImmutableTwoByteString` exists.
+ */
+ const char16_t* chars() const {
+ return reinterpret_cast<const char16_t*>(string_.chars());
+ }
+
+ /**
+ * Get the length of the underlying string.
+ */
+ size_t length() const { return string_.length() / sizeof(char16_t); }
+};
+
+} // namespace js
+
+#endif // vm_SharedImmutableStringsCache_h
diff --git a/js/src/vm/SharedMem.h b/js/src/vm/SharedMem.h
new file mode 100644
index 0000000000..04a03bafd0
--- /dev/null
+++ b/js/src/vm/SharedMem.h
@@ -0,0 +1,208 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_SharedMem_h
+#define vm_SharedMem_h
+
+#include "mozilla/Assertions.h"
+
+#include <type_traits>
+
+template <typename T>
+class SharedMem {
+ static_assert(std::is_pointer_v<T>, "SharedMem encapsulates pointer types");
+
+ enum Sharedness { IsUnshared, IsShared };
+
+ T ptr_;
+#ifdef DEBUG
+ Sharedness sharedness_;
+#endif
+
+ SharedMem(T ptr, Sharedness sharedness)
+ : ptr_(ptr)
+#ifdef DEBUG
+ ,
+ sharedness_(sharedness)
+#endif
+ {
+ }
+
+ public:
+ // Create a SharedMem<T> that is an unshared nullptr.
+ SharedMem()
+ : ptr_(nullptr)
+#ifdef DEBUG
+ ,
+ sharedness_(IsUnshared)
+#endif
+ {
+ }
+
+ // Create a SharedMem<T> that's shared/unshared in the same way as
+ // "forSharedness".
+ SharedMem(T ptr, const SharedMem& forSharedness)
+ : ptr_(ptr)
+#ifdef DEBUG
+ ,
+ sharedness_(forSharedness.sharedness_)
+#endif
+ {
+ }
+
+ // Create a SharedMem<T> that's marked as shared.
+ static SharedMem shared(void* p) {
+ return SharedMem(static_cast<T>(p), IsShared);
+ }
+
+ // Create a SharedMem<T> that's marked as unshared.
+ static SharedMem unshared(void* p) {
+ return SharedMem(static_cast<T>(p), IsUnshared);
+ }
+
+ SharedMem& operator=(const SharedMem& that) {
+ ptr_ = that.ptr_;
+#ifdef DEBUG
+ sharedness_ = that.sharedness_;
+#endif
+ return *this;
+ }
+
+ // Reinterpret-cast the pointer to type U, preserving sharedness.
+ // Eg, "obj->dataPointerEither().cast<uint8_t*>()" yields a
+ // SharedMem<uint8_t*>.
+ template <typename U>
+ inline SharedMem<U> cast() const {
+#ifdef DEBUG
+ MOZ_ASSERT(
+ asValue() %
+ sizeof(std::conditional_t<std::is_void_v<std::remove_pointer_t<U>>,
+ char, std::remove_pointer_t<U>>) ==
+ 0);
+ if (sharedness_ == IsUnshared) {
+ return SharedMem<U>::unshared(unwrap());
+ }
+#endif
+ return SharedMem<U>::shared(unwrap());
+ }
+
+ explicit operator bool() const { return ptr_ != nullptr; }
+
+ SharedMem operator+(size_t offset) const {
+ return SharedMem(ptr_ + offset, *this);
+ }
+
+ SharedMem operator-(size_t offset) const {
+ return SharedMem(ptr_ - offset, *this);
+ }
+
+ SharedMem operator++() {
+ ptr_++;
+ return *this;
+ }
+
+ SharedMem operator++(int) {
+ SharedMem<T> result(*this);
+ ptr_++;
+ return result;
+ }
+
+ SharedMem operator--() {
+ ptr_--;
+ return *this;
+ }
+
+ SharedMem operator--(int) {
+ SharedMem<T> result(*this);
+ ptr_--;
+ return result;
+ }
+
+ uintptr_t asValue() const { return reinterpret_cast<uintptr_t>(ptr_); }
+
+ // Cast to char*, add nbytes, and cast back to T. Simplifies code in a few
+ // places.
+ SharedMem addBytes(size_t nbytes) {
+ MOZ_ASSERT(
+ nbytes %
+ sizeof(std::conditional_t<std::is_void_v<std::remove_pointer_t<T>>,
+ char, std::remove_pointer_t<T>>) ==
+ 0);
+ return SharedMem(
+ reinterpret_cast<T>(reinterpret_cast<char*>(ptr_) + nbytes), *this);
+ }
+
+ T unwrap() const { return ptr_; }
+
+ T unwrapUnshared() const {
+ MOZ_ASSERT(sharedness_ == IsUnshared);
+ return ptr_;
+ }
+
+ uintptr_t unwrapValue() const { return reinterpret_cast<uintptr_t>(ptr_); }
+};
+
+template <typename T>
+inline bool operator>=(const SharedMem<T>& a, const SharedMem<T>& b) {
+ return a.unwrap() >= b.unwrap();
+}
+
+template <typename T>
+inline bool operator>=(const void* a, const SharedMem<T>& b) {
+ return a >= b.unwrap();
+}
+
+template <typename T>
+inline bool operator==(const void* a, const SharedMem<T>& b) {
+ return a == b.unwrap();
+}
+
+template <typename T>
+inline bool operator==(const SharedMem<T>& a, decltype(nullptr) b) {
+ return a.unwrap() == b;
+}
+
+template <typename T>
+inline bool operator==(const SharedMem<T>& a, const SharedMem<T>& b) {
+ return a.unwrap() == b.unwrap();
+}
+
+template <typename T>
+inline bool operator!=(const SharedMem<T>& a, decltype(nullptr) b) {
+ return a.unwrap() != b;
+}
+
+template <typename T>
+inline bool operator!=(const SharedMem<T>& a, const SharedMem<T>& b) {
+ return a.unwrap() != b.unwrap();
+}
+
+template <typename T>
+inline bool operator>(const SharedMem<T>& a, const SharedMem<T>& b) {
+ return a.unwrap() > b.unwrap();
+}
+
+template <typename T>
+inline bool operator>(const void* a, const SharedMem<T>& b) {
+ return a > b.unwrap();
+}
+
+template <typename T>
+inline bool operator<=(const SharedMem<T>& a, const SharedMem<T>& b) {
+ return a.unwrap() <= b.unwrap();
+}
+
+template <typename T>
+inline bool operator<=(const void* a, const SharedMem<T>& b) {
+ return a <= b.unwrap();
+}
+
+template <typename T>
+inline bool operator<(const void* a, const SharedMem<T>& b) {
+ return a < b.unwrap();
+}
+
+#endif // vm_SharedMem_h
diff --git a/js/src/vm/SharedScriptDataTableHolder.cpp b/js/src/vm/SharedScriptDataTableHolder.cpp
new file mode 100644
index 0000000000..8575b6695d
--- /dev/null
+++ b/js/src/vm/SharedScriptDataTableHolder.cpp
@@ -0,0 +1,19 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/SharedScriptDataTableHolder.h"
+
+#include "vm/MutexIDs.h" // mutexid
+
+using namespace js;
+
+js::Mutex AutoLockGlobalScriptData::mutex_(mutexid::SharedImmutableScriptData);
+
+AutoLockGlobalScriptData::AutoLockGlobalScriptData() { mutex_.lock(); }
+
+AutoLockGlobalScriptData::~AutoLockGlobalScriptData() { mutex_.unlock(); }
+
+SharedScriptDataTableHolder js::globalSharedScriptDataTableHolder;
diff --git a/js/src/vm/SharedScriptDataTableHolder.h b/js/src/vm/SharedScriptDataTableHolder.h
new file mode 100644
index 0000000000..96e51dcaa6
--- /dev/null
+++ b/js/src/vm/SharedScriptDataTableHolder.h
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_SharedScriptDataTableHolder_h
+#define vm_SharedScriptDataTableHolder_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Maybe.h" // mozilla::Maybe
+
+#include "threading/Mutex.h" // js::Mutex
+#include "vm/SharedStencil.h" // js::SharedImmutableScriptDataTable
+
+namespace js {
+
+class AutoLockGlobalScriptData {
+ static js::Mutex mutex_;
+
+ public:
+ AutoLockGlobalScriptData();
+ ~AutoLockGlobalScriptData();
+};
+
+// A class to provide an access to SharedImmutableScriptDataTable,
+// with or without a mutex lock.
+//
+// js::globalSharedScriptDataTableHolder singleton can be used by any thread,
+// and it needs a mutex lock.
+//
+// AutoLockGlobalScriptData lock;
+// auto& table = js::globalSharedScriptDataTableHolder::get(lock);
+//
+// Private SharedScriptDataTableHolder instance can be created for thread-local
+// storage, and it can be configured not to require a mutex lock.
+//
+// SharedScriptDataTableHolder holder(
+// SharedScriptDataTableHolder::NeedsLock::No);
+// ...
+// auto& table = holder.getWithoutLock();
+//
+// getMaybeLocked method can be used for both type of instances.
+//
+// Maybe<AutoLockGlobalScriptData> lock;
+// auto& table = holder.getMaybeLocked(lock);
+//
+// Private instance is supposed to be held by the each JSRuntime, including
+// both main thread runtime and worker thread runtime, and used in for
+// non-helper-thread compilation.
+//
+// js::globalSharedScriptDataTableHolder singleton is supposed to be used by
+// all helper-thread compilation.
+class SharedScriptDataTableHolder {
+ bool needsLock_ = true;
+ js::SharedImmutableScriptDataTable scriptDataTable_;
+
+ public:
+ enum class NeedsLock { No, Yes };
+
+ explicit SharedScriptDataTableHolder(NeedsLock needsLock = NeedsLock::Yes)
+ : needsLock_(needsLock == NeedsLock::Yes) {}
+
+ js::SharedImmutableScriptDataTable& get(
+ const js::AutoLockGlobalScriptData& lock) {
+ MOZ_ASSERT(needsLock_);
+ return scriptDataTable_;
+ }
+
+ js::SharedImmutableScriptDataTable& getWithoutLock() {
+ MOZ_ASSERT(!needsLock_);
+ return scriptDataTable_;
+ }
+
+ js::SharedImmutableScriptDataTable& getMaybeLocked(
+ mozilla::Maybe<js::AutoLockGlobalScriptData>& lock) {
+ if (needsLock_) {
+ lock.emplace();
+ }
+ return scriptDataTable_;
+ }
+};
+
+extern SharedScriptDataTableHolder globalSharedScriptDataTableHolder;
+
+} /* namespace js */
+
+#endif /* vm_SharedScriptDataTableHolder_h */
diff --git a/js/src/vm/SharedStencil.h b/js/src/vm/SharedStencil.h
new file mode 100644
index 0000000000..f4787c1451
--- /dev/null
+++ b/js/src/vm/SharedStencil.h
@@ -0,0 +1,849 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_SharedStencil_h
+#define vm_SharedStencil_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_CRASH
+#include "mozilla/Atomics.h" // mozilla::{Atomic, SequentiallyConsistent}
+#include "mozilla/CheckedInt.h" // mozilla::CheckedInt
+#include "mozilla/HashFunctions.h" // mozilla::HahNumber, mozilla::HashBytes
+#include "mozilla/HashTable.h" // mozilla::HashSet
+#include "mozilla/MemoryReporting.h" // mozilla::MallocSizeOf
+#include "mozilla/RefPtr.h" // RefPtr
+#include "mozilla/Span.h" // mozilla::Span
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint16_t, uint32_t
+
+#include "frontend/SourceNotes.h" // js::SrcNote
+#include "frontend/TypedIndex.h" // js::frontend::TypedIndex
+
+#include "js/AllocPolicy.h" // js::SystemAllocPolicy
+#include "js/TypeDecls.h" // JSContext,jsbytecode
+#include "js/UniquePtr.h" // js::UniquePtr
+#include "js/Vector.h" // js::Vector
+#include "util/EnumFlags.h" // js::EnumFlags
+#include "util/TrailingArray.h" // js::TrailingArray
+#include "vm/GeneratorAndAsyncKind.h" // GeneratorKind, FunctionAsyncKind
+#include "vm/StencilEnums.h" // js::{TryNoteKind,ImmutableScriptFlagsEnum,MutableScriptFlagsEnum}
+
+//
+// Data structures shared between Stencil and the VM.
+//
+
+namespace js {
+
+class FrontendContext;
+
+namespace frontend {
+class StencilXDR;
+} // namespace frontend
+
+// Index into gcthings array.
+class GCThingIndexType;
+class GCThingIndex : public frontend::TypedIndex<GCThingIndexType> {
+ // Delegate constructors;
+ using Base = frontend::TypedIndex<GCThingIndexType>;
+ using Base::Base;
+
+ public:
+ static constexpr GCThingIndex outermostScopeIndex() {
+ return GCThingIndex(0);
+ }
+
+ static constexpr GCThingIndex invalid() { return GCThingIndex(UINT32_MAX); }
+
+ GCThingIndex next() const { return GCThingIndex(index + 1); }
+};
+
+/*
+ * Exception handling record.
+ */
+struct TryNote {
+ uint32_t kind_; /* one of TryNoteKind */
+ uint32_t stackDepth; /* stack depth upon exception handler entry */
+ uint32_t start; /* start of the try statement or loop relative
+ to script->code() */
+ uint32_t length; /* length of the try statement or loop */
+
+ TryNote(uint32_t kind, uint32_t stackDepth, uint32_t start, uint32_t length)
+ : kind_(kind), stackDepth(stackDepth), start(start), length(length) {}
+
+ TryNote() = default;
+
+ TryNoteKind kind() const { return TryNoteKind(kind_); }
+
+ bool isLoop() const {
+ switch (kind()) {
+ case TryNoteKind::Loop:
+ case TryNoteKind::ForIn:
+ case TryNoteKind::ForOf:
+ return true;
+ case TryNoteKind::Catch:
+ case TryNoteKind::Finally:
+ case TryNoteKind::ForOfIterClose:
+ case TryNoteKind::Destructuring:
+ return false;
+ }
+ MOZ_CRASH("Unexpected try note kind");
+ }
+};
+
+// A block scope has a range in bytecode: it is entered at some offset, and left
+// at some later offset. Scopes can be nested. Given an offset, the
+// ScopeNote containing that offset whose with the highest start value
+// indicates the block scope. The block scope list is sorted by increasing
+// start value.
+//
+// It is possible to leave a scope nonlocally, for example via a "break"
+// statement, so there may be short bytecode ranges in a block scope in which we
+// are popping the block chain in preparation for a goto. These exits are also
+// nested with respect to outer scopes. The scopes in these exits are indicated
+// by the "index" field, just like any other block. If a nonlocal exit pops the
+// last block scope, the index will be NoScopeIndex.
+//
+struct ScopeNote {
+ // Sentinel index for no Scope.
+ static constexpr GCThingIndex NoScopeIndex = GCThingIndex::invalid();
+
+ // Sentinel index for no ScopeNote.
+ static const uint32_t NoScopeNoteIndex = UINT32_MAX;
+
+ // Index of the js::Scope in the script's gcthings array, or NoScopeIndex if
+ // there is no block scope in this range.
+ GCThingIndex index;
+
+ // Bytecode offset at which this scope starts relative to script->code().
+ uint32_t start = 0;
+
+ // Length of bytecode span this scope covers.
+ uint32_t length = 0;
+
+ // Index of parent block scope in notes, or NoScopeNoteIndex.
+ uint32_t parent = 0;
+};
+
+// Range of characters in scriptSource which contains a script's source,
+// that is, the range used by the Parser to produce a script.
+//
+// For most functions the fields point to the following locations.
+//
+// function * foo(a, b) { return a + b; }
+// ^ ^ ^
+// | | |
+// | sourceStart sourceEnd
+// | |
+// toStringStart toStringEnd
+//
+// For the special case of class constructors, the spec requires us to use an
+// alternate definition of toStringStart / toStringEnd.
+//
+// class C { constructor() { this.field = 42; } }
+// ^ ^ ^ ^
+// | | | |
+// | sourceStart sourceEnd |
+// | |
+// toStringStart toStringEnd
+//
+// Implicit class constructors use the following definitions.
+//
+// class C { someMethod() { } }
+// ^ ^
+// | |
+// sourceStart sourceEnd
+// | |
+// toStringStart toStringEnd
+//
+// Field initializer lambdas are internal details of the engine, but we still
+// provide a sensible definition of these values.
+//
+// class C { static field = 1 }
+// class C { field = 1 }
+// class C { somefield }
+// ^ ^
+// | |
+// sourceStart sourceEnd
+//
+// The non-static private class methods (including getters and setters) ALSO
+// create a hidden initializer lambda in addition to the method itself. These
+// lambdas are not exposed directly to script.
+//
+// class C { #field() { } }
+// class C { get #field() { } }
+// class C { async #field() { } }
+// class C { * #field() { } }
+// ^ ^
+// | |
+// sourceStart sourceEnd
+//
+// NOTE: These are counted in Code Units from the start of the script source.
+//
+// Also included in the SourceExtent is the line and column numbers of the
+// sourceStart position. Compilation options may specify the initial line and
+// column number.
+//
+// NOTE: Column number may saturate and must not be used as unique identifier.
+struct SourceExtent {
+ SourceExtent() = default;
+
+ SourceExtent(uint32_t sourceStart, uint32_t sourceEnd, uint32_t toStringStart,
+ uint32_t toStringEnd, uint32_t lineno, uint32_t column)
+ : sourceStart(sourceStart),
+ sourceEnd(sourceEnd),
+ toStringStart(toStringStart),
+ toStringEnd(toStringEnd),
+ lineno(lineno),
+ column(column) {}
+
+ static SourceExtent makeGlobalExtent(uint32_t len) {
+ return SourceExtent(0, len, 0, len, 1, 0);
+ }
+
+ static SourceExtent makeGlobalExtent(uint32_t len, uint32_t lineno,
+ uint32_t column) {
+ return SourceExtent(0, len, 0, len, lineno, column);
+ }
+
+ // FunctionKey is an encoded position of a function within the source text
+ // that is unique and reproducible.
+ using FunctionKey = uint32_t;
+ static constexpr FunctionKey NullFunctionKey = 0;
+
+ uint32_t sourceStart = 0;
+ uint32_t sourceEnd = 0;
+ uint32_t toStringStart = 0;
+ uint32_t toStringEnd = 0;
+
+ // Line and column of |sourceStart_| position.
+ uint32_t lineno = 1; // 1-indexed.
+ uint32_t column = 0; // Count of Code Points
+
+ FunctionKey toFunctionKey() const {
+ // In eval("x=>1"), the arrow function will have a sourceStart of 0 which
+ // conflicts with the NullFunctionKey, so shift all keys by 1 instead.
+ auto result = sourceStart + 1;
+ MOZ_ASSERT(result != NullFunctionKey);
+ return result;
+ }
+};
+
+class ImmutableScriptFlags : public EnumFlags<ImmutableScriptFlagsEnum> {
+ public:
+ ImmutableScriptFlags() = default;
+
+ explicit ImmutableScriptFlags(FieldType rawFlags) : EnumFlags(rawFlags) {}
+
+ operator FieldType() const { return flags_; }
+};
+
+class MutableScriptFlags : public EnumFlags<MutableScriptFlagsEnum> {
+ public:
+ MutableScriptFlags() = default;
+
+ MutableScriptFlags& operator&=(const FieldType rhs) {
+ flags_ &= rhs;
+ return *this;
+ }
+
+ MutableScriptFlags& operator|=(const FieldType rhs) {
+ flags_ |= rhs;
+ return *this;
+ }
+
+ operator FieldType() const { return flags_; }
+};
+
+#define GENERIC_FLAGS_READ_ONLY(Field, Enum) \
+ [[nodiscard]] bool hasFlag(Enum flag) const { return Field.hasFlag(flag); }
+
+#define GENERIC_FLAGS_READ_WRITE(Field, Enum) \
+ [[nodiscard]] bool hasFlag(Enum flag) const { return Field.hasFlag(flag); } \
+ void setFlag(Enum flag, bool b = true) { Field.setFlag(flag, b); } \
+ void clearFlag(Enum flag) { Field.clearFlag(flag); }
+
+#define GENERIC_FLAG_GETTER(enumName, lowerName, name) \
+ bool lowerName() const { return hasFlag(enumName::name); }
+
+#define GENERIC_FLAG_GETTER_SETTER(enumName, lowerName, name) \
+ GENERIC_FLAG_GETTER(enumName, lowerName, name) \
+ void set##name() { setFlag(enumName::name); } \
+ void set##name(bool b) { setFlag(enumName::name, b); } \
+ void clear##name() { clearFlag(enumName::name); }
+
+#define IMMUTABLE_SCRIPT_FLAGS_WITH_ACCESSORS(_) \
+ _(ImmutableFlags, isForEval, IsForEval) \
+ _(ImmutableFlags, isModule, IsModule) \
+ _(ImmutableFlags, isFunction, IsFunction) \
+ _(ImmutableFlags, selfHosted, SelfHosted) \
+ _(ImmutableFlags, forceStrict, ForceStrict) \
+ _(ImmutableFlags, hasNonSyntacticScope, HasNonSyntacticScope) \
+ _(ImmutableFlags, noScriptRval, NoScriptRval) \
+ _(ImmutableFlags, treatAsRunOnce, TreatAsRunOnce) \
+ _(ImmutableFlags, strict, Strict) \
+ _(ImmutableFlags, hasModuleGoal, HasModuleGoal) \
+ _(ImmutableFlags, hasInnerFunctions, HasInnerFunctions) \
+ _(ImmutableFlags, hasDirectEval, HasDirectEval) \
+ _(ImmutableFlags, bindingsAccessedDynamically, BindingsAccessedDynamically) \
+ _(ImmutableFlags, hasCallSiteObj, HasCallSiteObj) \
+ _(ImmutableFlags, isAsync, IsAsync) \
+ _(ImmutableFlags, isGenerator, IsGenerator) \
+ _(ImmutableFlags, funHasExtensibleScope, FunHasExtensibleScope) \
+ _(ImmutableFlags, functionHasThisBinding, FunctionHasThisBinding) \
+ _(ImmutableFlags, needsHomeObject, NeedsHomeObject) \
+ _(ImmutableFlags, isDerivedClassConstructor, IsDerivedClassConstructor) \
+ _(ImmutableFlags, isSyntheticFunction, IsSyntheticFunction) \
+ _(ImmutableFlags, useMemberInitializers, UseMemberInitializers) \
+ _(ImmutableFlags, hasRest, HasRest) \
+ _(ImmutableFlags, needsFunctionEnvironmentObjects, \
+ NeedsFunctionEnvironmentObjects) \
+ _(ImmutableFlags, functionHasExtraBodyVarScope, \
+ FunctionHasExtraBodyVarScope) \
+ _(ImmutableFlags, shouldDeclareArguments, ShouldDeclareArguments) \
+ _(ImmutableFlags, needsArgsObj, NeedsArgsObj) \
+ _(ImmutableFlags, hasMappedArgsObj, HasMappedArgsObj) \
+ _(ImmutableFlags, isInlinableLargeFunction, IsInlinableLargeFunction) \
+ _(ImmutableFlags, functionHasNewTargetBinding, FunctionHasNewTargetBinding) \
+ _(ImmutableFlags, usesArgumentsIntrinsics, UsesArgumentsIntrinsics) \
+ \
+ GeneratorKind generatorKind() const { \
+ return isGenerator() ? GeneratorKind::Generator \
+ : GeneratorKind::NotGenerator; \
+ } \
+ \
+ FunctionAsyncKind asyncKind() const { \
+ return isAsync() ? FunctionAsyncKind::AsyncFunction \
+ : FunctionAsyncKind::SyncFunction; \
+ } \
+ \
+ bool isRelazifiable() const { \
+ /* \
+ ** A script may not be relazifiable if parts of it can be entrained in \
+ ** interesting ways: \
+ ** - Scripts with inner-functions or direct-eval (which can add \
+ ** inner-functions) should not be relazified as their Scopes may be \
+ ** part of another scope-chain. \
+ ** - Generators and async functions may be re-entered in complex ways so \
+ ** don't discard bytecode. The JIT resume code assumes this. \
+ ** - Functions with template literals must always return the same object \
+ ** instance so must not discard it by relazifying. \
+ */ \
+ return !hasInnerFunctions() && !hasDirectEval() && !isGenerator() && \
+ !isAsync() && !hasCallSiteObj(); \
+ }
+
+#define RO_IMMUTABLE_SCRIPT_FLAGS(Field) \
+ using ImmutableFlags = ImmutableScriptFlagsEnum; \
+ \
+ GENERIC_FLAGS_READ_ONLY(Field, ImmutableFlags) \
+ IMMUTABLE_SCRIPT_FLAGS_WITH_ACCESSORS(GENERIC_FLAG_GETTER)
+
+#define MUTABLE_SCRIPT_FLAGS_WITH_ACCESSORS(_) \
+ _(MutableFlags, hasRunOnce, HasRunOnce) \
+ _(MutableFlags, hasScriptCounts, HasScriptCounts) \
+ _(MutableFlags, hasDebugScript, HasDebugScript) \
+ _(MutableFlags, allowRelazify, AllowRelazify) \
+ _(MutableFlags, spewEnabled, SpewEnabled) \
+ _(MutableFlags, needsFinalWarmUpCount, NeedsFinalWarmUpCount) \
+ _(MutableFlags, failedBoundsCheck, FailedBoundsCheck) \
+ _(MutableFlags, hadLICMInvalidation, HadLICMInvalidation) \
+ _(MutableFlags, hadReorderingBailout, HadReorderingBailout) \
+ _(MutableFlags, hadEagerTruncationBailout, HadEagerTruncationBailout) \
+ _(MutableFlags, hadUnboxFoldingBailout, HadUnboxFoldingBailout) \
+ _(MutableFlags, baselineDisabled, BaselineDisabled) \
+ _(MutableFlags, ionDisabled, IonDisabled) \
+ _(MutableFlags, uninlineable, Uninlineable) \
+ _(MutableFlags, noEagerBaselineHint, NoEagerBaselineHint) \
+ _(MutableFlags, failedLexicalCheck, FailedLexicalCheck) \
+ _(MutableFlags, hadSpeculativePhiBailout, HadSpeculativePhiBailout)
+
+#define RW_MUTABLE_SCRIPT_FLAGS(Field) \
+ using MutableFlags = MutableScriptFlagsEnum; \
+ \
+ GENERIC_FLAGS_READ_WRITE(Field, MutableFlags) \
+ MUTABLE_SCRIPT_FLAGS_WITH_ACCESSORS(GENERIC_FLAG_GETTER_SETTER)
+
+// [SMDOC] JSScript data layout (immutable)
+//
+// ImmutableScriptData stores variable-length script data that may be shared
+// between scripts with the same bytecode, even across different GC Zones.
+// Abstractly this structure consists of multiple (optional) arrays that are
+// exposed as mozilla::Span<T>. These arrays exist in a single heap allocation.
+//
+// Under the hood, ImmutableScriptData is a fixed-size header class followed
+// the various array bodies interleaved with metadata to compactly encode the
+// bounds. These arrays have varying requirements for alignment, performance,
+// and jit-friendliness which leads to the complex indexing system below.
+//
+// Note: The '----' separators are for readability only.
+//
+// ----
+// <ImmutableScriptData itself>
+// ----
+// (REQUIRED) Flags structure
+// (REQUIRED) Array of jsbytecode constituting code()
+// (REQUIRED) Array of SrcNote constituting notes()
+// ----
+// (OPTIONAL) Array of uint32_t optional-offsets
+// optArrayOffset:
+// ----
+// L0:
+// (OPTIONAL) Array of uint32_t constituting resumeOffsets()
+// L1:
+// (OPTIONAL) Array of ScopeNote constituting scopeNotes()
+// L2:
+// (OPTIONAL) Array of TryNote constituting tryNotes()
+// L3:
+// ----
+//
+// NOTE: The notes() array must have been null-padded such that
+// flags/code/notes together have uint32_t alignment.
+//
+// The labels shown are recorded as byte-offsets relative to 'this'. This is to
+// reduce memory as well as make ImmutableScriptData easier to share across
+// processes.
+//
+// The L0/L1/L2/L3 labels indicate the start and end of the optional arrays.
+// Some of these labels may refer to the same location if the array between
+// them is empty. Each unique label position has an offset stored in the
+// optional-offsets table. Note that we also avoid entries for labels that
+// match 'optArrayOffset'. This saves memory when arrays are empty.
+//
+// The flags() data indicates (for each optional array) which entry from the
+// optional-offsets table marks the *end* of array. The array starts where the
+// previous array ends (with the first array beginning at 'optArrayOffset').
+// The optional-offset table is addressed at negative indices from
+// 'optArrayOffset'.
+//
+// In general, the length of each array is computed from subtracting the start
+// offset of the array from the start offset of the subsequent array. The
+// notable exception is that bytecode length is stored explicitly.
+class alignas(uint32_t) ImmutableScriptData final : public TrailingArray {
+ private:
+ Offset optArrayOffset_ = 0;
+
+ // Length of bytecode
+ uint32_t codeLength_ = 0;
+
+ public:
+ // Offset of main entry point from code, after predef'ing prologue.
+ uint32_t mainOffset = 0;
+
+ // Fixed frame slots.
+ uint32_t nfixed = 0;
+
+ // Slots plus maximum stack depth.
+ uint32_t nslots = 0;
+
+ // Index into the gcthings array of the body scope.
+ GCThingIndex bodyScopeIndex;
+
+ // Number of IC entries to allocate in JitScript for Baseline ICs.
+ uint32_t numICEntries = 0;
+
+ // ES6 function length.
+ uint16_t funLength = 0;
+
+ // Property Count estimate
+ uint16_t propertyCountEstimate = 0;
+
+ // NOTE: The raw bytes of this structure are used for hashing so use explicit
+ // padding values as needed for predicatable results across compilers
+
+ private:
+ struct Flags {
+ uint8_t resumeOffsetsEndIndex : 2;
+ uint8_t scopeNotesEndIndex : 2;
+ uint8_t tryNotesEndIndex : 2;
+ uint8_t _unused : 2;
+ };
+ static_assert(sizeof(Flags) == sizeof(uint8_t),
+ "Structure packing is broken");
+
+ // Offsets (in bytes) from 'this' to each component array. The delta between
+ // each offset and the next offset is the size of each array and is defined
+ // even if an array is empty.
+ Offset flagOffset() const { return offsetOfCode() - sizeof(Flags); }
+ Offset codeOffset() const { return offsetOfCode(); }
+ Offset noteOffset() const { return offsetOfCode() + codeLength_; }
+ Offset optionalOffsetsOffset() const {
+ // Determine the location to beginning of optional-offsets array by looking
+ // at index for try-notes.
+ //
+ // optionalOffsetsOffset():
+ // (OPTIONAL) tryNotesEndOffset
+ // (OPTIONAL) scopeNotesEndOffset
+ // (OPTIONAL) resumeOffsetsEndOffset
+ // optArrayOffset_:
+ // ....
+ unsigned numOffsets = flags().tryNotesEndIndex;
+ MOZ_ASSERT(numOffsets >= flags().scopeNotesEndIndex);
+ MOZ_ASSERT(numOffsets >= flags().resumeOffsetsEndIndex);
+
+ return optArrayOffset_ - (numOffsets * sizeof(Offset));
+ }
+ Offset resumeOffsetsOffset() const { return optArrayOffset_; }
+ Offset scopeNotesOffset() const {
+ return getOptionalOffset(flags().resumeOffsetsEndIndex);
+ }
+ Offset tryNotesOffset() const {
+ return getOptionalOffset(flags().scopeNotesEndIndex);
+ }
+ Offset endOffset() const {
+ return getOptionalOffset(flags().tryNotesEndIndex);
+ }
+
+ void initOptionalArrays(Offset* cursor, uint32_t numResumeOffsets,
+ uint32_t numScopeNotes, uint32_t numTryNotes);
+
+ // Initialize to GC-safe state
+ ImmutableScriptData(uint32_t codeLength, uint32_t noteLength,
+ uint32_t numResumeOffsets, uint32_t numScopeNotes,
+ uint32_t numTryNotes);
+
+ void setOptionalOffset(int index, Offset offset) {
+ MOZ_ASSERT(index > 0);
+ MOZ_ASSERT(offset != optArrayOffset_, "Do not store implicit offset");
+ offsetToPointer<Offset>(optArrayOffset_)[-index] = offset;
+ }
+ Offset getOptionalOffset(int index) const {
+ // The index 0 represents (implicitly) the offset 'optArrayOffset_'.
+ if (index == 0) {
+ return optArrayOffset_;
+ }
+
+ ImmutableScriptData* this_ = const_cast<ImmutableScriptData*>(this);
+ return this_->offsetToPointer<Offset>(optArrayOffset_)[-index];
+ }
+
+ public:
+ static js::UniquePtr<ImmutableScriptData> new_(
+ FrontendContext* fc, uint32_t mainOffset, uint32_t nfixed,
+ uint32_t nslots, GCThingIndex bodyScopeIndex, uint32_t numICEntries,
+ bool isFunction, uint16_t funLength, uint16_t propertyCountEstimate,
+ mozilla::Span<const jsbytecode> code, mozilla::Span<const SrcNote> notes,
+ mozilla::Span<const uint32_t> resumeOffsets,
+ mozilla::Span<const ScopeNote> scopeNotes,
+ mozilla::Span<const TryNote> tryNotes);
+
+ static js::UniquePtr<ImmutableScriptData> new_(
+ FrontendContext* fc, uint32_t codeLength, uint32_t noteLength,
+ uint32_t numResumeOffsets, uint32_t numScopeNotes, uint32_t numTryNotes);
+
+ static js::UniquePtr<ImmutableScriptData> new_(FrontendContext* fc,
+ uint32_t totalSize);
+
+ // Validate internal offsets of the data structure seems reasonable. This is
+ // for diagnositic purposes only to detect severe corruption. This is not a
+ // security boundary!
+ bool validateLayout(uint32_t expectedSize);
+
+ private:
+ static mozilla::CheckedInt<uint32_t> sizeFor(uint32_t codeLength,
+ uint32_t noteLength,
+ uint32_t numResumeOffsets,
+ uint32_t numScopeNotes,
+ uint32_t numTryNotes);
+
+ public:
+ // The code() and note() arrays together maintain an target alignment by
+ // padding the source notes with null. This allows arrays with stricter
+ // alignment requirements to follow them.
+ static constexpr size_t CodeNoteAlign = sizeof(uint32_t);
+
+ // Compute number of null notes to pad out source notes with.
+ static uint32_t ComputeNotePadding(uint32_t codeLength, uint32_t noteLength) {
+ uint32_t flagLength = sizeof(Flags);
+ uint32_t nullLength =
+ CodeNoteAlign - (flagLength + codeLength + noteLength) % CodeNoteAlign;
+
+ // The source notes must have at least one null-terminator.
+ MOZ_ASSERT(nullLength >= 1);
+
+ return nullLength;
+ }
+
+ // Span over all raw bytes in this struct and its trailing arrays.
+ mozilla::Span<const uint8_t> immutableData() const {
+ size_t allocSize = endOffset();
+ return mozilla::Span{reinterpret_cast<const uint8_t*>(this), allocSize};
+ }
+
+ private:
+ Flags& flagsRef() { return *offsetToPointer<Flags>(flagOffset()); }
+ const Flags& flags() const {
+ return const_cast<ImmutableScriptData*>(this)->flagsRef();
+ }
+
+ public:
+ uint32_t codeLength() const { return codeLength_; }
+ jsbytecode* code() { return offsetToPointer<jsbytecode>(codeOffset()); }
+ mozilla::Span<jsbytecode> codeSpan() { return {code(), codeLength()}; }
+
+ uint32_t noteLength() const {
+ return numElements<SrcNote>(noteOffset(), optionalOffsetsOffset());
+ }
+ SrcNote* notes() { return offsetToPointer<SrcNote>(noteOffset()); }
+ mozilla::Span<SrcNote> notesSpan() { return {notes(), noteLength()}; }
+
+ mozilla::Span<uint32_t> resumeOffsets() {
+ return mozilla::Span{offsetToPointer<uint32_t>(resumeOffsetsOffset()),
+ offsetToPointer<uint32_t>(scopeNotesOffset())};
+ }
+ mozilla::Span<ScopeNote> scopeNotes() {
+ return mozilla::Span{offsetToPointer<ScopeNote>(scopeNotesOffset()),
+ offsetToPointer<ScopeNote>(tryNotesOffset())};
+ }
+ mozilla::Span<TryNote> tryNotes() {
+ return mozilla::Span{offsetToPointer<TryNote>(tryNotesOffset()),
+ offsetToPointer<TryNote>(endOffset())};
+ }
+
+ // Expose offsets to the JITs.
+ static constexpr size_t offsetOfCode() {
+ return sizeof(ImmutableScriptData) + sizeof(Flags);
+ }
+ static constexpr size_t offsetOfResumeOffsetsOffset() {
+ // Resume-offsets are the first optional array if they exist. Locate the
+ // array with the 'optArrayOffset_' field.
+ static_assert(sizeof(Offset) == sizeof(uint32_t),
+ "JIT expect Offset to be uint32_t");
+ return offsetof(ImmutableScriptData, optArrayOffset_);
+ }
+ static constexpr size_t offsetOfNfixed() {
+ return offsetof(ImmutableScriptData, nfixed);
+ }
+ static constexpr size_t offsetOfNslots() {
+ return offsetof(ImmutableScriptData, nslots);
+ }
+ static constexpr size_t offsetOfFunLength() {
+ return offsetof(ImmutableScriptData, funLength);
+ }
+
+ // ImmutableScriptData has trailing data so isn't copyable or movable.
+ ImmutableScriptData(const ImmutableScriptData&) = delete;
+ ImmutableScriptData& operator=(const ImmutableScriptData&) = delete;
+};
+
+// Wrapper type for ImmutableScriptData to allow sharing across a JSRuntime.
+//
+// Note: This is distinct from ImmutableScriptData because it contains a mutable
+// ref-count while the ImmutableScriptData may live in read-only memory.
+//
+// Note: This is *not* directly inlined into the SharedImmutableScriptDataTable
+// because scripts point directly to object and table resizing moves
+// entries. This allows for fast finalization by decrementing the
+// ref-count directly without doing a hash-table lookup.
+class SharedImmutableScriptData {
+ static constexpr uint32_t IsExternalFlag = 0x80000000;
+ static constexpr uint32_t RefCountBits = 0x7FFFFFFF;
+
+ // This class is reference counted as follows: each pointer from a JSScript
+ // counts as one reference plus there may be one reference from the shared
+ // script data table.
+ mozilla::Atomic<uint32_t, mozilla::SequentiallyConsistent>
+ refCountAndExternalFlags_ = {};
+
+ mozilla::HashNumber hash_;
+ ImmutableScriptData* isd_ = nullptr;
+
+ // End of fields.
+
+ friend class ::JSScript;
+ friend class js::frontend::StencilXDR;
+
+ public:
+ SharedImmutableScriptData() = default;
+
+ ~SharedImmutableScriptData() { reset(); }
+
+ private:
+ bool isExternal() const { return refCountAndExternalFlags_ & IsExternalFlag; }
+ void setIsExternal() { refCountAndExternalFlags_ |= IsExternalFlag; }
+ void unsetIsExternal() { refCountAndExternalFlags_ &= RefCountBits; }
+
+ void reset() {
+ if (isd_ && !isExternal()) {
+ js_delete(isd_);
+ }
+ isd_ = nullptr;
+ }
+
+ mozilla::HashNumber calculateHash() const {
+ mozilla::Span<const uint8_t> immutableData = isd_->immutableData();
+ return mozilla::HashBytes(immutableData.data(), immutableData.size());
+ }
+
+ public:
+ // Hash over the contents of SharedImmutableScriptData and its
+ // ImmutableScriptData.
+ struct Hasher;
+
+ uint32_t refCount() const { return refCountAndExternalFlags_ & RefCountBits; }
+ void AddRef() { refCountAndExternalFlags_++; }
+
+ private:
+ uint32_t decrementRef() {
+ MOZ_ASSERT(refCount() != 0);
+ return --refCountAndExternalFlags_ & RefCountBits;
+ }
+
+ public:
+ void Release() {
+ uint32_t remain = decrementRef();
+ if (remain == 0) {
+ reset();
+ js_free(this);
+ }
+ }
+
+ static constexpr size_t offsetOfISD() {
+ return offsetof(SharedImmutableScriptData, isd_);
+ }
+
+ private:
+ static SharedImmutableScriptData* create(FrontendContext* fc);
+
+ public:
+ static SharedImmutableScriptData* createWith(
+ FrontendContext* fc, js::UniquePtr<ImmutableScriptData>&& isd);
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ size_t isdSize = isExternal() ? 0 : mallocSizeOf(isd_);
+ return mallocSizeOf(this) + isdSize;
+ }
+
+ // SharedImmutableScriptData has trailing data so isn't copyable or movable.
+ SharedImmutableScriptData(const SharedImmutableScriptData&) = delete;
+ SharedImmutableScriptData& operator=(const SharedImmutableScriptData&) =
+ delete;
+
+ static bool shareScriptData(FrontendContext* fc,
+ RefPtr<SharedImmutableScriptData>& sisd);
+
+ size_t immutableDataLength() const { return isd_->immutableData().Length(); }
+ uint32_t nfixed() const { return isd_->nfixed; }
+
+ ImmutableScriptData* get() { return isd_; }
+ mozilla::HashNumber hash() const { return hash_; }
+
+ void setOwn(js::UniquePtr<ImmutableScriptData>&& isd) {
+ MOZ_ASSERT(!isd_);
+ isd_ = isd.release();
+ unsetIsExternal();
+
+ hash_ = calculateHash();
+ }
+
+ void setOwn(js::UniquePtr<ImmutableScriptData>&& isd,
+ mozilla::HashNumber hash) {
+ MOZ_ASSERT(!isd_);
+ isd_ = isd.release();
+ unsetIsExternal();
+
+ MOZ_ASSERT(hash == calculateHash());
+ hash_ = hash;
+ }
+
+ void setExternal(ImmutableScriptData* isd) {
+ MOZ_ASSERT(!isd_);
+ isd_ = isd;
+ setIsExternal();
+
+ hash_ = calculateHash();
+ }
+
+ void setExternal(ImmutableScriptData* isd, mozilla::HashNumber hash) {
+ MOZ_ASSERT(!isd_);
+ isd_ = isd;
+ setIsExternal();
+
+ MOZ_ASSERT(hash == calculateHash());
+ hash_ = hash;
+ }
+};
+
+// Matches SharedImmutableScriptData objects that have the same atoms as well as
+// contain the same bytes in their ImmutableScriptData.
+struct SharedImmutableScriptData::Hasher {
+ using Lookup = RefPtr<SharedImmutableScriptData>;
+
+ static mozilla::HashNumber hash(const Lookup& l) { return l->hash(); }
+
+ static bool match(SharedImmutableScriptData* entry, const Lookup& lookup) {
+ return (entry->isd_->immutableData() == lookup->isd_->immutableData());
+ }
+};
+
+using SharedImmutableScriptDataTable =
+ mozilla::HashSet<SharedImmutableScriptData*,
+ SharedImmutableScriptData::Hasher, SystemAllocPolicy>;
+
+struct MemberInitializers {
+ static constexpr size_t NumBits = 31;
+ static constexpr uint32_t MaxInitializers = BitMask(NumBits);
+
+#ifdef DEBUG
+ bool valid = false;
+#endif
+
+ bool hasPrivateBrand : 1;
+
+ // This struct will eventually have a vector of constant values for optimizing
+ // field initializers.
+ uint32_t numMemberInitializers : NumBits;
+
+ MemberInitializers(bool hasPrivateBrand, uint32_t numMemberInitializers)
+ :
+#ifdef DEBUG
+ valid(true),
+#endif
+ hasPrivateBrand(hasPrivateBrand),
+ numMemberInitializers(numMemberInitializers) {
+ MOZ_ASSERT(
+ this->numMemberInitializers == numMemberInitializers,
+ "numMemberInitializers should easily fit in the 31-bit bitfield");
+ }
+
+ static MemberInitializers Invalid() { return MemberInitializers(); }
+
+ // Singleton to use for class constructors that do not have to initialize any
+ // fields. This is used when we elide the trivial data but still need a valid
+ // set to stop scope walking.
+ static const MemberInitializers& Empty() {
+ static const MemberInitializers zeroInitializers(false, 0);
+ return zeroInitializers;
+ }
+
+ uint32_t serialize() const {
+ return (hasPrivateBrand << NumBits) | numMemberInitializers;
+ }
+
+ static MemberInitializers deserialize(uint32_t bits) {
+ return MemberInitializers((bits & Bit(NumBits)) != 0,
+ bits & BitMask(NumBits));
+ }
+
+ private:
+ MemberInitializers()
+ :
+#ifdef DEBUG
+ valid(false),
+#endif
+ hasPrivateBrand(false),
+ numMemberInitializers(0) {
+ }
+};
+
+// See JSOp::Lambda for interepretation of this index.
+using FunctionDeclaration = GCThingIndex;
+// Defined here to avoid #include cycle with Stencil.h.
+using FunctionDeclarationVector =
+ Vector<FunctionDeclaration, 0, js::SystemAllocPolicy>;
+
+} // namespace js
+
+#endif /* vm_SharedStencil_h */
diff --git a/js/src/vm/SourceHook.cpp b/js/src/vm/SourceHook.cpp
new file mode 100644
index 0000000000..59d0b74c71
--- /dev/null
+++ b/js/src/vm/SourceHook.cpp
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/experimental/SourceHook.h"
+
+#include "mozilla/UniquePtr.h" // mozilla::UniquePtr
+
+#include <utility> // std::move
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "vm/JSContext.h" // JSContext
+#include "vm/Runtime.h" // JSRuntime
+
+JS_PUBLIC_API void js::SetSourceHook(JSContext* cx,
+ mozilla::UniquePtr<SourceHook> hook) {
+ cx->runtime()->sourceHook.ref() = std::move(hook);
+}
+
+JS_PUBLIC_API mozilla::UniquePtr<js::SourceHook> js::ForgetSourceHook(
+ JSContext* cx) {
+ return std::move(cx->runtime()->sourceHook.ref());
+}
diff --git a/js/src/vm/Stack-inl.h b/js/src/vm/Stack-inl.h
new file mode 100644
index 0000000000..e94c4f6be0
--- /dev/null
+++ b/js/src/vm/Stack-inl.h
@@ -0,0 +1,859 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Stack_inl_h
+#define vm_Stack_inl_h
+
+#include "vm/Stack.h"
+
+#include "mozilla/PodOperations.h"
+
+#include "jit/BaselineFrame.h"
+#include "jit/RematerializedFrame.h"
+#include "js/friend/StackLimits.h" // js::ReportOverRecursed
+#include "vm/EnvironmentObject.h"
+#include "vm/Interpreter.h"
+#include "vm/JSContext.h"
+#include "vm/JSScript.h"
+
+#include "jit/BaselineFrame-inl.h"
+#include "jit/RematerializedFrame-inl.h" // js::jit::RematerializedFrame::unsetIsDebuggee
+#include "vm/JSScript-inl.h"
+#include "vm/NativeObject-inl.h"
+
+namespace js {
+
+inline HandleObject InterpreterFrame::environmentChain() const {
+ return HandleObject::fromMarkedLocation(&envChain_);
+}
+
+inline GlobalObject& InterpreterFrame::global() const {
+ return script()->global();
+}
+
+inline ExtensibleLexicalEnvironmentObject&
+InterpreterFrame::extensibleLexicalEnvironment() const {
+ return NearestEnclosingExtensibleLexicalEnvironment(environmentChain());
+}
+
+inline void InterpreterFrame::initCallFrame(InterpreterFrame* prev,
+ jsbytecode* prevpc, Value* prevsp,
+ JSFunction& callee,
+ JSScript* script, Value* argv,
+ uint32_t nactual,
+ MaybeConstruct constructing) {
+ MOZ_ASSERT(callee.baseScript() == script);
+
+ /* Initialize stack frame members. */
+ flags_ = 0;
+ if (constructing) {
+ flags_ |= CONSTRUCTING;
+ }
+ argv_ = argv;
+ script_ = script;
+ nactual_ = nactual;
+ envChain_ = callee.environment();
+ prev_ = prev;
+ prevpc_ = prevpc;
+ prevsp_ = prevsp;
+
+ if (script->isDebuggee()) {
+ setIsDebuggee();
+ }
+
+ initLocals();
+}
+
+inline void InterpreterFrame::initLocals() {
+ SetValueRangeToUndefined(slots(), script()->nfixed());
+}
+
+inline Value& InterpreterFrame::unaliasedLocal(uint32_t i) {
+ MOZ_ASSERT(i < script()->nfixed());
+ return slots()[i];
+}
+
+inline Value& InterpreterFrame::unaliasedFormal(
+ unsigned i, MaybeCheckAliasing checkAliasing) {
+ MOZ_ASSERT(i < numFormalArgs());
+ MOZ_ASSERT_IF(checkAliasing, !script()->argsObjAliasesFormals());
+ MOZ_ASSERT_IF(checkAliasing, !script()->formalIsAliased(i));
+ return argv()[i];
+}
+
+inline Value& InterpreterFrame::unaliasedActual(
+ unsigned i, MaybeCheckAliasing checkAliasing) {
+ MOZ_ASSERT(i < numActualArgs());
+ MOZ_ASSERT_IF(checkAliasing, !script()->argsObjAliasesFormals());
+ MOZ_ASSERT_IF(checkAliasing && i < numFormalArgs(),
+ !script()->formalIsAliased(i));
+ return argv()[i];
+}
+
+template <class Op>
+inline void InterpreterFrame::unaliasedForEachActual(Op op) {
+ // Don't assert !script()->funHasAnyAliasedFormal() since this function is
+ // called from ArgumentsObject::createUnexpected() which can access aliased
+ // slots.
+
+ const Value* argsEnd = argv() + numActualArgs();
+ for (const Value* p = argv(); p < argsEnd; ++p) {
+ op(*p);
+ }
+}
+
+inline ArgumentsObject& InterpreterFrame::argsObj() const {
+ MOZ_ASSERT(script()->needsArgsObj());
+ MOZ_ASSERT(flags_ & HAS_ARGS_OBJ);
+ return *argsObj_;
+}
+
+inline void InterpreterFrame::initArgsObj(ArgumentsObject& argsobj) {
+ MOZ_ASSERT(script()->needsArgsObj());
+ flags_ |= HAS_ARGS_OBJ;
+ argsObj_ = &argsobj;
+}
+
+inline EnvironmentObject& InterpreterFrame::aliasedEnvironment(
+ EnvironmentCoordinate ec) const {
+ JSObject* env = &environmentChain()->as<EnvironmentObject>();
+ for (unsigned i = ec.hops(); i; i--) {
+ env = &env->as<EnvironmentObject>().enclosingEnvironment();
+ }
+ return env->as<EnvironmentObject>();
+}
+
+inline EnvironmentObject& InterpreterFrame::aliasedEnvironmentMaybeDebug(
+ EnvironmentCoordinate ec) const {
+ JSObject* env = environmentChain();
+ for (unsigned i = ec.hops(); i; i--) {
+ if (env->is<EnvironmentObject>()) {
+ env = &env->as<EnvironmentObject>().enclosingEnvironment();
+ } else {
+ MOZ_ASSERT(env->is<DebugEnvironmentProxy>());
+ env = &env->as<DebugEnvironmentProxy>().enclosingEnvironment();
+ }
+ }
+ return env->is<EnvironmentObject>()
+ ? env->as<EnvironmentObject>()
+ : env->as<DebugEnvironmentProxy>().environment();
+}
+
+template <typename SpecificEnvironment>
+inline void InterpreterFrame::pushOnEnvironmentChain(SpecificEnvironment& env) {
+ MOZ_ASSERT(*environmentChain() == env.enclosingEnvironment());
+ envChain_ = &env;
+ if (IsFrameInitialEnvironment(this, env)) {
+ flags_ |= HAS_INITIAL_ENV;
+ }
+}
+
+template <typename SpecificEnvironment>
+inline void InterpreterFrame::popOffEnvironmentChain() {
+ MOZ_ASSERT(envChain_->is<SpecificEnvironment>());
+ envChain_ = &envChain_->as<SpecificEnvironment>().enclosingEnvironment();
+}
+
+inline void InterpreterFrame::replaceInnermostEnvironment(
+ BlockLexicalEnvironmentObject& env) {
+ MOZ_ASSERT(
+ env.enclosingEnvironment() ==
+ envChain_->as<BlockLexicalEnvironmentObject>().enclosingEnvironment());
+ envChain_ = &env;
+}
+
+bool InterpreterFrame::hasInitialEnvironment() const {
+ MOZ_ASSERT(script()->initialEnvironmentShape());
+ return flags_ & HAS_INITIAL_ENV;
+}
+
+inline CallObject& InterpreterFrame::callObj() const {
+ MOZ_ASSERT(callee().needsCallObject());
+
+ JSObject* pobj = environmentChain();
+ while (MOZ_UNLIKELY(!pobj->is<CallObject>())) {
+ pobj = pobj->enclosingEnvironment();
+ }
+ return pobj->as<CallObject>();
+}
+
+inline void InterpreterFrame::unsetIsDebuggee() {
+ MOZ_ASSERT(!script()->isDebuggee());
+ flags_ &= ~DEBUGGEE;
+}
+
+inline bool InterpreterFrame::saveGeneratorSlots(JSContext* cx, unsigned nslots,
+ ArrayObject* dest) const {
+ return dest->initDenseElementsFromRange(cx, slots(), slots() + nslots);
+}
+
+inline void InterpreterFrame::restoreGeneratorSlots(ArrayObject* src) {
+ MOZ_ASSERT(script()->nfixed() <= src->length());
+ MOZ_ASSERT(src->length() <= script()->nslots());
+ MOZ_ASSERT(src->getDenseInitializedLength() == src->length());
+ const Value* srcElements = src->getDenseElements();
+ mozilla::PodCopy(slots(), srcElements, src->length());
+}
+
+/*****************************************************************************/
+
+inline void InterpreterStack::purge(JSRuntime* rt) {
+ rt->gc.queueUnusedLifoBlocksForFree(&allocator_);
+}
+
+uint8_t* InterpreterStack::allocateFrame(JSContext* cx, size_t size) {
+ size_t maxFrames;
+ if (cx->realm()->principals() == cx->runtime()->trustedPrincipals()) {
+ maxFrames = MAX_FRAMES_TRUSTED;
+ } else {
+ maxFrames = MAX_FRAMES;
+ }
+
+ if (MOZ_UNLIKELY(frameCount_ >= maxFrames)) {
+ ReportOverRecursed(cx);
+ return nullptr;
+ }
+
+ uint8_t* buffer = reinterpret_cast<uint8_t*>(allocator_.alloc(size));
+ if (!buffer) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ frameCount_++;
+ return buffer;
+}
+
+MOZ_ALWAYS_INLINE InterpreterFrame* InterpreterStack::getCallFrame(
+ JSContext* cx, const CallArgs& args, HandleScript script,
+ MaybeConstruct constructing, Value** pargv) {
+ JSFunction* fun = &args.callee().as<JSFunction>();
+
+ MOZ_ASSERT(fun->baseScript() == script);
+ unsigned nformal = fun->nargs();
+ unsigned nvals = script->nslots();
+
+ if (args.length() >= nformal) {
+ *pargv = args.array();
+ uint8_t* buffer =
+ allocateFrame(cx, sizeof(InterpreterFrame) + nvals * sizeof(Value));
+ return reinterpret_cast<InterpreterFrame*>(buffer);
+ }
+
+ // Pad any missing arguments with |undefined|.
+ MOZ_ASSERT(args.length() < nformal);
+
+ unsigned nfunctionState = 2 + constructing; // callee, |this|, |new.target|
+
+ nvals += nformal + nfunctionState;
+ uint8_t* buffer =
+ allocateFrame(cx, sizeof(InterpreterFrame) + nvals * sizeof(Value));
+ if (!buffer) {
+ return nullptr;
+ }
+
+ Value* argv = reinterpret_cast<Value*>(buffer);
+ unsigned nmissing = nformal - args.length();
+
+ mozilla::PodCopy(argv, args.base(), 2 + args.length());
+ SetValueRangeToUndefined(argv + 2 + args.length(), nmissing);
+
+ if (constructing) {
+ argv[2 + nformal] = args.newTarget();
+ }
+
+ *pargv = argv + 2;
+ return reinterpret_cast<InterpreterFrame*>(argv + nfunctionState + nformal);
+}
+
+MOZ_ALWAYS_INLINE bool InterpreterStack::pushInlineFrame(
+ JSContext* cx, InterpreterRegs& regs, const CallArgs& args,
+ HandleScript script, MaybeConstruct constructing) {
+ RootedFunction callee(cx, &args.callee().as<JSFunction>());
+ MOZ_ASSERT(regs.sp == args.end());
+ MOZ_ASSERT(callee->baseScript() == script);
+
+ InterpreterFrame* prev = regs.fp();
+ jsbytecode* prevpc = regs.pc;
+ Value* prevsp = regs.sp;
+ MOZ_ASSERT(prev);
+
+ LifoAlloc::Mark mark = allocator_.mark();
+
+ Value* argv;
+ InterpreterFrame* fp = getCallFrame(cx, args, script, constructing, &argv);
+ if (!fp) {
+ return false;
+ }
+
+ fp->mark_ = mark;
+
+ /* Initialize frame, locals, regs. */
+ fp->initCallFrame(prev, prevpc, prevsp, *callee, script, argv, args.length(),
+ constructing);
+
+ regs.prepareToRun(*fp, script);
+ return true;
+}
+
+MOZ_ALWAYS_INLINE bool InterpreterStack::resumeGeneratorCallFrame(
+ JSContext* cx, InterpreterRegs& regs, HandleFunction callee,
+ HandleObject envChain) {
+ MOZ_ASSERT(callee->isGenerator() || callee->isAsync());
+ RootedScript script(cx, callee->nonLazyScript());
+ InterpreterFrame* prev = regs.fp();
+ jsbytecode* prevpc = regs.pc;
+ Value* prevsp = regs.sp;
+ MOZ_ASSERT(prev);
+
+ LifoAlloc::Mark mark = allocator_.mark();
+
+ // (Async) generators and async functions are not constructors.
+ MOZ_ASSERT(!callee->isConstructor());
+
+ // Include callee, |this|, and maybe |new.target|
+ unsigned nformal = callee->nargs();
+ unsigned nvals = 2 + nformal + script->nslots();
+
+ uint8_t* buffer =
+ allocateFrame(cx, sizeof(InterpreterFrame) + nvals * sizeof(Value));
+ if (!buffer) {
+ return false;
+ }
+
+ Value* argv = reinterpret_cast<Value*>(buffer) + 2;
+ argv[-2] = ObjectValue(*callee);
+ argv[-1] = UndefinedValue();
+ SetValueRangeToUndefined(argv, nformal);
+
+ InterpreterFrame* fp = reinterpret_cast<InterpreterFrame*>(argv + nformal);
+ fp->mark_ = mark;
+ fp->initCallFrame(prev, prevpc, prevsp, *callee, script, argv, 0,
+ NO_CONSTRUCT);
+ fp->resumeGeneratorFrame(envChain);
+
+ regs.prepareToRun(*fp, script);
+ return true;
+}
+
+MOZ_ALWAYS_INLINE void InterpreterStack::popInlineFrame(InterpreterRegs& regs) {
+ InterpreterFrame* fp = regs.fp();
+ regs.popInlineFrame();
+ regs.sp[-1] = fp->returnValue();
+ releaseFrame(fp);
+ MOZ_ASSERT(regs.fp());
+}
+
+inline HandleValue AbstractFramePtr::returnValue() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->returnValue();
+ }
+ if (isWasmDebugFrame()) {
+ return asWasmDebugFrame()->returnValue();
+ }
+ return asBaselineFrame()->returnValue();
+}
+
+inline void AbstractFramePtr::setReturnValue(const Value& rval) const {
+ if (isInterpreterFrame()) {
+ asInterpreterFrame()->setReturnValue(rval);
+ return;
+ }
+ if (isBaselineFrame()) {
+ asBaselineFrame()->setReturnValue(rval);
+ return;
+ }
+ if (isWasmDebugFrame()) {
+ // TODO handle wasm function return value
+ // The function is called from Debugger::slowPathOnLeaveFrame --
+ // ignoring value for wasm.
+ return;
+ }
+ asRematerializedFrame()->setReturnValue(rval);
+}
+
+inline JSObject* AbstractFramePtr::environmentChain() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->environmentChain();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->environmentChain();
+ }
+ if (isWasmDebugFrame()) {
+ return &global()->lexicalEnvironment();
+ }
+ return asRematerializedFrame()->environmentChain();
+}
+
+template <typename SpecificEnvironment>
+inline void AbstractFramePtr::pushOnEnvironmentChain(SpecificEnvironment& env) {
+ if (isInterpreterFrame()) {
+ asInterpreterFrame()->pushOnEnvironmentChain(env);
+ return;
+ }
+ if (isBaselineFrame()) {
+ asBaselineFrame()->pushOnEnvironmentChain(env);
+ return;
+ }
+ asRematerializedFrame()->pushOnEnvironmentChain(env);
+}
+
+template <typename SpecificEnvironment>
+inline void AbstractFramePtr::popOffEnvironmentChain() {
+ if (isInterpreterFrame()) {
+ asInterpreterFrame()->popOffEnvironmentChain<SpecificEnvironment>();
+ return;
+ }
+ if (isBaselineFrame()) {
+ asBaselineFrame()->popOffEnvironmentChain<SpecificEnvironment>();
+ return;
+ }
+ asRematerializedFrame()->popOffEnvironmentChain<SpecificEnvironment>();
+}
+
+inline CallObject& AbstractFramePtr::callObj() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->callObj();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->callObj();
+ }
+ return asRematerializedFrame()->callObj();
+}
+
+inline bool AbstractFramePtr::initFunctionEnvironmentObjects(JSContext* cx) {
+ return js::InitFunctionEnvironmentObjects(cx, *this);
+}
+
+inline bool AbstractFramePtr::pushVarEnvironment(JSContext* cx,
+ Handle<Scope*> scope) {
+ return js::PushVarEnvironmentObject(cx, scope, *this);
+}
+
+inline JS::Realm* AbstractFramePtr::realm() const {
+ return environmentChain()->nonCCWRealm();
+}
+
+inline unsigned AbstractFramePtr::numActualArgs() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->numActualArgs();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->numActualArgs();
+ }
+ return asRematerializedFrame()->numActualArgs();
+}
+
+inline unsigned AbstractFramePtr::numFormalArgs() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->numFormalArgs();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->numFormalArgs();
+ }
+ return asRematerializedFrame()->numFormalArgs();
+}
+
+inline Value& AbstractFramePtr::unaliasedLocal(uint32_t i) {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->unaliasedLocal(i);
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->unaliasedLocal(i);
+ }
+ return asRematerializedFrame()->unaliasedLocal(i);
+}
+
+inline Value& AbstractFramePtr::unaliasedFormal(
+ unsigned i, MaybeCheckAliasing checkAliasing) {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->unaliasedFormal(i, checkAliasing);
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->unaliasedFormal(i, checkAliasing);
+ }
+ return asRematerializedFrame()->unaliasedFormal(i, checkAliasing);
+}
+
+inline Value& AbstractFramePtr::unaliasedActual(
+ unsigned i, MaybeCheckAliasing checkAliasing) {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->unaliasedActual(i, checkAliasing);
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->unaliasedActual(i, checkAliasing);
+ }
+ return asRematerializedFrame()->unaliasedActual(i, checkAliasing);
+}
+
+inline bool AbstractFramePtr::hasInitialEnvironment() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->hasInitialEnvironment();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->hasInitialEnvironment();
+ }
+ return asRematerializedFrame()->hasInitialEnvironment();
+}
+
+inline bool AbstractFramePtr::isGlobalFrame() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->isGlobalFrame();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->isGlobalFrame();
+ }
+ if (isWasmDebugFrame()) {
+ return false;
+ }
+ return asRematerializedFrame()->isGlobalFrame();
+}
+
+inline bool AbstractFramePtr::isModuleFrame() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->isModuleFrame();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->isModuleFrame();
+ }
+ if (isWasmDebugFrame()) {
+ return false;
+ }
+ return asRematerializedFrame()->isModuleFrame();
+}
+
+inline bool AbstractFramePtr::isEvalFrame() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->isEvalFrame();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->isEvalFrame();
+ }
+ if (isWasmDebugFrame()) {
+ return false;
+ }
+ MOZ_ASSERT(isRematerializedFrame());
+ return false;
+}
+
+inline bool AbstractFramePtr::isDebuggerEvalFrame() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->isDebuggerEvalFrame();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->isDebuggerEvalFrame();
+ }
+ MOZ_ASSERT(isRematerializedFrame());
+ return false;
+}
+
+inline bool AbstractFramePtr::isDebuggee() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->isDebuggee();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->isDebuggee();
+ }
+ if (isWasmDebugFrame()) {
+ return asWasmDebugFrame()->isDebuggee();
+ }
+ return asRematerializedFrame()->isDebuggee();
+}
+
+inline void AbstractFramePtr::setIsDebuggee() {
+ if (isInterpreterFrame()) {
+ asInterpreterFrame()->setIsDebuggee();
+ } else if (isBaselineFrame()) {
+ asBaselineFrame()->setIsDebuggee();
+ } else if (isWasmDebugFrame()) {
+ asWasmDebugFrame()->setIsDebuggee();
+ } else {
+ asRematerializedFrame()->setIsDebuggee();
+ }
+}
+
+inline void AbstractFramePtr::unsetIsDebuggee() {
+ if (isInterpreterFrame()) {
+ asInterpreterFrame()->unsetIsDebuggee();
+ } else if (isBaselineFrame()) {
+ asBaselineFrame()->unsetIsDebuggee();
+ } else if (isWasmDebugFrame()) {
+ asWasmDebugFrame()->unsetIsDebuggee();
+ } else {
+ asRematerializedFrame()->unsetIsDebuggee();
+ }
+}
+
+inline bool AbstractFramePtr::isConstructing() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->isConstructing();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->isConstructing();
+ }
+ if (isRematerializedFrame()) {
+ return asRematerializedFrame()->isConstructing();
+ }
+ MOZ_CRASH("Unexpected frame");
+}
+
+inline bool AbstractFramePtr::hasCachedSavedFrame() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->hasCachedSavedFrame();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->framePrefix()->hasCachedSavedFrame();
+ }
+ if (isWasmDebugFrame()) {
+ return asWasmDebugFrame()->hasCachedSavedFrame();
+ }
+ return asRematerializedFrame()->hasCachedSavedFrame();
+}
+
+inline bool AbstractFramePtr::hasArgs() const { return isFunctionFrame(); }
+
+inline bool AbstractFramePtr::hasScript() const { return !isWasmDebugFrame(); }
+
+inline JSScript* AbstractFramePtr::script() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->script();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->script();
+ }
+ return asRematerializedFrame()->script();
+}
+
+inline wasm::Instance* AbstractFramePtr::wasmInstance() const {
+ return asWasmDebugFrame()->instance();
+}
+
+inline GlobalObject* AbstractFramePtr::global() const {
+ if (isWasmDebugFrame()) {
+ return asWasmDebugFrame()->global();
+ }
+ return &script()->global();
+}
+
+inline bool AbstractFramePtr::hasGlobal(const GlobalObject* global) const {
+ if (isWasmDebugFrame()) {
+ return asWasmDebugFrame()->hasGlobal(global);
+ }
+ return script()->hasGlobal(global);
+}
+
+inline JSFunction* AbstractFramePtr::callee() const {
+ if (isInterpreterFrame()) {
+ return &asInterpreterFrame()->callee();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->callee();
+ }
+ return asRematerializedFrame()->callee();
+}
+
+inline Value AbstractFramePtr::calleev() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->calleev();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->calleev();
+ }
+ return asRematerializedFrame()->calleev();
+}
+
+inline bool AbstractFramePtr::isFunctionFrame() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->isFunctionFrame();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->isFunctionFrame();
+ }
+ if (isWasmDebugFrame()) {
+ return false;
+ }
+ return asRematerializedFrame()->isFunctionFrame();
+}
+
+inline bool AbstractFramePtr::isGeneratorFrame() const {
+ if (!isFunctionFrame() && !isModuleFrame()) {
+ return false;
+ }
+ JSScript* s = script();
+ return s->isGenerator() || s->isAsync();
+}
+
+inline bool AbstractFramePtr::saveGeneratorSlots(JSContext* cx, unsigned nslots,
+ ArrayObject* dest) const {
+ MOZ_ASSERT(isGeneratorFrame());
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->saveGeneratorSlots(cx, nslots, dest);
+ }
+ MOZ_ASSERT(isBaselineFrame(), "unexpected generator frame in Ion");
+ return asBaselineFrame()->saveGeneratorSlots(cx, nslots, dest);
+}
+
+inline Value* AbstractFramePtr::argv() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->argv();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->argv();
+ }
+ return asRematerializedFrame()->argv();
+}
+
+inline bool AbstractFramePtr::hasArgsObj() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->hasArgsObj();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->hasArgsObj();
+ }
+ return asRematerializedFrame()->hasArgsObj();
+}
+
+inline ArgumentsObject& AbstractFramePtr::argsObj() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->argsObj();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->argsObj();
+ }
+ return asRematerializedFrame()->argsObj();
+}
+
+inline void AbstractFramePtr::initArgsObj(ArgumentsObject& argsobj) const {
+ if (isInterpreterFrame()) {
+ asInterpreterFrame()->initArgsObj(argsobj);
+ return;
+ }
+ asBaselineFrame()->initArgsObj(argsobj);
+}
+
+inline bool AbstractFramePtr::prevUpToDate() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->prevUpToDate();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->prevUpToDate();
+ }
+ if (isWasmDebugFrame()) {
+ return asWasmDebugFrame()->prevUpToDate();
+ }
+ return asRematerializedFrame()->prevUpToDate();
+}
+
+inline void AbstractFramePtr::setPrevUpToDate() const {
+ if (isInterpreterFrame()) {
+ asInterpreterFrame()->setPrevUpToDate();
+ return;
+ }
+ if (isBaselineFrame()) {
+ asBaselineFrame()->setPrevUpToDate();
+ return;
+ }
+ if (isWasmDebugFrame()) {
+ asWasmDebugFrame()->setPrevUpToDate();
+ return;
+ }
+ asRematerializedFrame()->setPrevUpToDate();
+}
+
+inline void AbstractFramePtr::unsetPrevUpToDate() const {
+ if (isInterpreterFrame()) {
+ asInterpreterFrame()->unsetPrevUpToDate();
+ return;
+ }
+ if (isBaselineFrame()) {
+ asBaselineFrame()->unsetPrevUpToDate();
+ return;
+ }
+ if (isWasmDebugFrame()) {
+ asWasmDebugFrame()->unsetPrevUpToDate();
+ return;
+ }
+ asRematerializedFrame()->unsetPrevUpToDate();
+}
+
+inline Value& AbstractFramePtr::thisArgument() const {
+ if (isInterpreterFrame()) {
+ return asInterpreterFrame()->thisArgument();
+ }
+ if (isBaselineFrame()) {
+ return asBaselineFrame()->thisArgument();
+ }
+ return asRematerializedFrame()->thisArgument();
+}
+
+inline bool AbstractFramePtr::debuggerNeedsCheckPrimitiveReturn() const {
+ if (isWasmDebugFrame()) {
+ return false;
+ }
+ return script()->isDerivedClassConstructor();
+}
+
+InterpreterActivation::InterpreterActivation(RunState& state, JSContext* cx,
+ InterpreterFrame* entryFrame)
+ : Activation(cx, Interpreter),
+ entryFrame_(entryFrame),
+ opMask_(0)
+#ifdef DEBUG
+ ,
+ oldFrameCount_(cx->interpreterStack().frameCount_)
+#endif
+{
+ regs_.prepareToRun(*entryFrame, state.script());
+ MOZ_ASSERT(regs_.pc == state.script()->code());
+}
+
+InterpreterActivation::~InterpreterActivation() {
+ // Pop all inline frames.
+ while (regs_.fp() != entryFrame_) {
+ popInlineFrame(regs_.fp());
+ }
+
+ MOZ_ASSERT(oldFrameCount_ == cx_->interpreterStack().frameCount_);
+ MOZ_ASSERT_IF(oldFrameCount_ == 0,
+ cx_->interpreterStack().allocator_.used() == 0);
+
+ if (entryFrame_) {
+ cx_->interpreterStack().releaseFrame(entryFrame_);
+ }
+}
+
+inline bool InterpreterActivation::pushInlineFrame(
+ const CallArgs& args, HandleScript script, MaybeConstruct constructing) {
+ if (!cx_->interpreterStack().pushInlineFrame(cx_, regs_, args, script,
+ constructing)) {
+ return false;
+ }
+ MOZ_ASSERT(regs_.fp()->script()->compartment() == compartment());
+ return true;
+}
+
+inline void InterpreterActivation::popInlineFrame(InterpreterFrame* frame) {
+ (void)frame; // Quell compiler warning.
+ MOZ_ASSERT(regs_.fp() == frame);
+ MOZ_ASSERT(regs_.fp() != entryFrame_);
+
+ cx_->interpreterStack().popInlineFrame(regs_);
+}
+
+inline bool InterpreterActivation::resumeGeneratorFrame(HandleFunction callee,
+ HandleObject envChain) {
+ InterpreterStack& stack = cx_->interpreterStack();
+ if (!stack.resumeGeneratorCallFrame(cx_, regs_, callee, envChain)) {
+ return false;
+ }
+
+ MOZ_ASSERT(regs_.fp()->script()->compartment() == compartment_);
+ return true;
+}
+
+} /* namespace js */
+
+#endif /* vm_Stack_inl_h */
diff --git a/js/src/vm/Stack.cpp b/js/src/vm/Stack.cpp
new file mode 100644
index 0000000000..574d8a9379
--- /dev/null
+++ b/js/src/vm/Stack.cpp
@@ -0,0 +1,766 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/Stack-inl.h"
+
+#include "mozilla/Maybe.h" // mozilla::Maybe
+
+#include <algorithm> // std::max
+#include <iterator> // std::size
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t
+
+#include "gc/Tracer.h" // js::TraceRoot
+#include "jit/JitcodeMap.h"
+#include "jit/JitRuntime.h"
+#include "js/friend/ErrorMessages.h" // JSMSG_*
+#include "js/Value.h" // JS::Value
+#include "vm/FrameIter.h" // js::FrameIter
+#include "vm/JSContext.h"
+#include "wasm/WasmProcess.h"
+
+#include "jit/JSJitFrameIter-inl.h"
+#include "vm/Probes-inl.h"
+
+using namespace js;
+
+using mozilla::Maybe;
+
+using JS::Value;
+
+/*****************************************************************************/
+
+void InterpreterFrame::initExecuteFrame(JSContext* cx, HandleScript script,
+ AbstractFramePtr evalInFramePrev,
+ HandleObject envChain) {
+ flags_ = 0;
+ script_ = script;
+
+ envChain_ = envChain.get();
+ prev_ = nullptr;
+ prevpc_ = nullptr;
+ prevsp_ = nullptr;
+
+ evalInFramePrev_ = evalInFramePrev;
+ MOZ_ASSERT_IF(evalInFramePrev, isDebuggerEvalFrame());
+
+ if (script->isDebuggee()) {
+ setIsDebuggee();
+ }
+
+#ifdef DEBUG
+ Debug_SetValueRangeToCrashOnTouch(&rval_, 1);
+#endif
+}
+
+ArrayObject* InterpreterFrame::createRestParameter(JSContext* cx) {
+ MOZ_ASSERT(script()->hasRest());
+ unsigned nformal = callee().nargs() - 1, nactual = numActualArgs();
+ unsigned nrest = (nactual > nformal) ? nactual - nformal : 0;
+ Value* restvp = argv() + nformal;
+ return NewDenseCopiedArray(cx, nrest, restvp);
+}
+
+static inline void AssertScopeMatchesEnvironment(Scope* scope,
+ JSObject* originalEnv) {
+#ifdef DEBUG
+ JSObject* env = originalEnv;
+ for (ScopeIter si(scope); si; si++) {
+ if (si.kind() == ScopeKind::NonSyntactic) {
+ while (env->is<WithEnvironmentObject>() ||
+ env->is<NonSyntacticVariablesObject>() ||
+ (env->is<LexicalEnvironmentObject>() &&
+ !env->as<LexicalEnvironmentObject>().isSyntactic())) {
+ MOZ_ASSERT(!IsSyntacticEnvironment(env));
+ env = &env->as<EnvironmentObject>().enclosingEnvironment();
+ }
+ } else if (si.hasSyntacticEnvironment()) {
+ switch (si.kind()) {
+ case ScopeKind::Function:
+ MOZ_ASSERT(env->as<CallObject>()
+ .callee()
+ .maybeCanonicalFunction()
+ ->nonLazyScript() ==
+ si.scope()->as<FunctionScope>().script());
+ env = &env->as<CallObject>().enclosingEnvironment();
+ break;
+
+ case ScopeKind::FunctionBodyVar:
+ MOZ_ASSERT(&env->as<VarEnvironmentObject>().scope() == si.scope());
+ env = &env->as<VarEnvironmentObject>().enclosingEnvironment();
+ break;
+
+ case ScopeKind::Lexical:
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ case ScopeKind::NamedLambda:
+ case ScopeKind::StrictNamedLambda:
+ case ScopeKind::FunctionLexical:
+ case ScopeKind::ClassBody:
+ MOZ_ASSERT(&env->as<ScopedLexicalEnvironmentObject>().scope() ==
+ si.scope());
+ env =
+ &env->as<ScopedLexicalEnvironmentObject>().enclosingEnvironment();
+ break;
+
+ case ScopeKind::With:
+ MOZ_ASSERT(&env->as<WithEnvironmentObject>().scope() == si.scope());
+ env = &env->as<WithEnvironmentObject>().enclosingEnvironment();
+ break;
+
+ case ScopeKind::Eval:
+ case ScopeKind::StrictEval:
+ env = &env->as<VarEnvironmentObject>().enclosingEnvironment();
+ break;
+
+ case ScopeKind::Global:
+ env =
+ &env->as<GlobalLexicalEnvironmentObject>().enclosingEnvironment();
+ MOZ_ASSERT(env->is<GlobalObject>());
+ break;
+
+ case ScopeKind::NonSyntactic:
+ MOZ_CRASH("NonSyntactic should not have a syntactic environment");
+ break;
+
+ case ScopeKind::Module:
+ MOZ_ASSERT(&env->as<ModuleEnvironmentObject>().module() ==
+ si.scope()->as<ModuleScope>().module());
+ env = &env->as<ModuleEnvironmentObject>().enclosingEnvironment();
+ break;
+
+ case ScopeKind::WasmInstance:
+ env =
+ &env->as<WasmInstanceEnvironmentObject>().enclosingEnvironment();
+ break;
+
+ case ScopeKind::WasmFunction:
+ env = &env->as<WasmFunctionCallObject>().enclosingEnvironment();
+ break;
+ }
+ }
+ }
+
+ // In the case of a non-syntactic env chain, the immediate parent of the
+ // outermost non-syntactic env may be the global lexical env, or, if
+ // called from Debugger, a DebugEnvironmentProxy.
+ //
+ // In the case of a syntactic env chain, the outermost env is always a
+ // GlobalObject.
+ MOZ_ASSERT(env->is<GlobalObject>() || IsGlobalLexicalEnvironment(env) ||
+ env->is<DebugEnvironmentProxy>());
+#endif
+}
+
+static inline void AssertScopeMatchesEnvironment(InterpreterFrame* fp,
+ jsbytecode* pc) {
+#ifdef DEBUG
+ // If we OOMed before fully initializing the environment chain, the scope
+ // and environment will definitely mismatch.
+ if (fp->script()->initialEnvironmentShape() && fp->hasInitialEnvironment()) {
+ AssertScopeMatchesEnvironment(fp->script()->innermostScope(pc),
+ fp->environmentChain());
+ }
+#endif
+}
+
+bool InterpreterFrame::initFunctionEnvironmentObjects(JSContext* cx) {
+ return js::InitFunctionEnvironmentObjects(cx, this);
+}
+
+bool InterpreterFrame::prologue(JSContext* cx) {
+ RootedScript script(cx, this->script());
+
+ MOZ_ASSERT(cx->interpreterRegs().pc == script->code());
+ MOZ_ASSERT(cx->realm() == script->realm());
+
+ if (!isFunctionFrame()) {
+ return probes::EnterScript(cx, script, nullptr, this);
+ }
+
+ // At this point, we've yet to push any environments. Check that they
+ // match the enclosing scope.
+ AssertScopeMatchesEnvironment(script->enclosingScope(), environmentChain());
+
+ if (callee().needsFunctionEnvironmentObjects() &&
+ !initFunctionEnvironmentObjects(cx)) {
+ return false;
+ }
+
+ MOZ_ASSERT_IF(isConstructing(),
+ thisArgument().isObject() ||
+ thisArgument().isMagic(JS_UNINITIALIZED_LEXICAL));
+
+ return probes::EnterScript(cx, script, script->function(), this);
+}
+
+void InterpreterFrame::epilogue(JSContext* cx, jsbytecode* pc) {
+ RootedScript script(cx, this->script());
+ MOZ_ASSERT(cx->realm() == script->realm());
+ probes::ExitScript(cx, script, script->function(),
+ hasPushedGeckoProfilerFrame());
+
+ // Check that the scope matches the environment at the point of leaving
+ // the frame.
+ AssertScopeMatchesEnvironment(this, pc);
+
+ EnvironmentIter ei(cx, this, pc);
+ UnwindAllEnvironmentsInFrame(cx, ei);
+
+ if (isFunctionFrame()) {
+ if (!callee().isGenerator() && !callee().isAsync() && isConstructing() &&
+ thisArgument().isObject() && returnValue().isPrimitive()) {
+ setReturnValue(thisArgument());
+ }
+
+ return;
+ }
+
+ MOZ_ASSERT(isEvalFrame() || isGlobalFrame() || isModuleFrame());
+}
+
+bool InterpreterFrame::checkReturn(JSContext* cx, HandleValue thisv,
+ MutableHandleValue result) {
+ MOZ_ASSERT(script()->isDerivedClassConstructor());
+ MOZ_ASSERT(isFunctionFrame());
+ MOZ_ASSERT(callee().isClassConstructor());
+
+ HandleValue retVal = returnValue();
+ if (retVal.isObject()) {
+ result.set(retVal);
+ return true;
+ }
+
+ if (!retVal.isUndefined()) {
+ ReportValueError(cx, JSMSG_BAD_DERIVED_RETURN, JSDVG_IGNORE_STACK, retVal,
+ nullptr);
+ return false;
+ }
+
+ if (thisv.isMagic(JS_UNINITIALIZED_LEXICAL)) {
+ return ThrowUninitializedThis(cx);
+ }
+
+ result.set(thisv);
+ return true;
+}
+
+bool InterpreterFrame::pushVarEnvironment(JSContext* cx, Handle<Scope*> scope) {
+ return js::PushVarEnvironmentObject(cx, scope, this);
+}
+
+bool InterpreterFrame::pushLexicalEnvironment(JSContext* cx,
+ Handle<LexicalScope*> scope) {
+ BlockLexicalEnvironmentObject* env =
+ BlockLexicalEnvironmentObject::createForFrame(cx, scope, this);
+ if (!env) {
+ return false;
+ }
+
+ pushOnEnvironmentChain(*env);
+ return true;
+}
+
+bool InterpreterFrame::freshenLexicalEnvironment(JSContext* cx) {
+ Rooted<BlockLexicalEnvironmentObject*> env(
+ cx, &envChain_->as<BlockLexicalEnvironmentObject>());
+ BlockLexicalEnvironmentObject* fresh =
+ BlockLexicalEnvironmentObject::clone(cx, env);
+ if (!fresh) {
+ return false;
+ }
+
+ replaceInnermostEnvironment(*fresh);
+ return true;
+}
+
+bool InterpreterFrame::recreateLexicalEnvironment(JSContext* cx) {
+ Rooted<BlockLexicalEnvironmentObject*> env(
+ cx, &envChain_->as<BlockLexicalEnvironmentObject>());
+ BlockLexicalEnvironmentObject* fresh =
+ BlockLexicalEnvironmentObject::recreate(cx, env);
+ if (!fresh) {
+ return false;
+ }
+
+ replaceInnermostEnvironment(*fresh);
+ return true;
+}
+
+bool InterpreterFrame::pushClassBodyEnvironment(JSContext* cx,
+ Handle<ClassBodyScope*> scope) {
+ ClassBodyLexicalEnvironmentObject* env =
+ ClassBodyLexicalEnvironmentObject::createForFrame(cx, scope, this);
+ if (!env) {
+ return false;
+ }
+
+ pushOnEnvironmentChain(*env);
+ return true;
+}
+
+void InterpreterFrame::trace(JSTracer* trc, Value* sp, jsbytecode* pc) {
+ TraceRoot(trc, &envChain_, "env chain");
+ TraceRoot(trc, &script_, "script");
+
+ if (flags_ & HAS_ARGS_OBJ) {
+ TraceRoot(trc, &argsObj_, "arguments");
+ }
+
+ if (hasReturnValue()) {
+ TraceRoot(trc, &rval_, "rval");
+ }
+
+ MOZ_ASSERT(sp >= slots());
+
+ if (hasArgs()) {
+ // Trace the callee and |this|. When we're doing a moving GC, we
+ // need to fix up the callee pointer before we use it below, under
+ // numFormalArgs() and script().
+ TraceRootRange(trc, 2, argv_ - 2, "fp callee and this");
+
+ // Trace arguments.
+ unsigned argc = std::max(numActualArgs(), numFormalArgs());
+ TraceRootRange(trc, argc + isConstructing(), argv_, "fp argv");
+ }
+
+ JSScript* script = this->script();
+ size_t nfixed = script->nfixed();
+ size_t nlivefixed = script->calculateLiveFixed(pc);
+
+ if (nfixed == nlivefixed) {
+ // All locals are live.
+ traceValues(trc, 0, sp - slots());
+ } else {
+ // Trace operand stack.
+ traceValues(trc, nfixed, sp - slots());
+
+ // Clear dead block-scoped locals.
+ while (nfixed > nlivefixed) {
+ unaliasedLocal(--nfixed).setUndefined();
+ }
+
+ // Trace live locals.
+ traceValues(trc, 0, nlivefixed);
+ }
+
+ if (auto* debugEnvs = script->realm()->debugEnvs()) {
+ debugEnvs->traceLiveFrame(trc, this);
+ }
+}
+
+void InterpreterFrame::traceValues(JSTracer* trc, unsigned start,
+ unsigned end) {
+ if (start < end) {
+ TraceRootRange(trc, end - start, slots() + start, "vm_stack");
+ }
+}
+
+static void TraceInterpreterActivation(JSTracer* trc,
+ InterpreterActivation* act) {
+ for (InterpreterFrameIterator frames(act); !frames.done(); ++frames) {
+ InterpreterFrame* fp = frames.frame();
+ fp->trace(trc, frames.sp(), frames.pc());
+ }
+}
+
+void js::TraceInterpreterActivations(JSContext* cx, JSTracer* trc) {
+ for (ActivationIterator iter(cx); !iter.done(); ++iter) {
+ Activation* act = iter.activation();
+ if (act->isInterpreter()) {
+ TraceInterpreterActivation(trc, act->asInterpreter());
+ }
+ }
+}
+
+/*****************************************************************************/
+
+// Unlike the other methods of this class, this method is defined here so that
+// we don't have to #include jsautooplen.h in vm/Stack.h.
+void InterpreterRegs::setToEndOfScript() { sp = fp()->base(); }
+
+/*****************************************************************************/
+
+InterpreterFrame* InterpreterStack::pushInvokeFrame(
+ JSContext* cx, const CallArgs& args, MaybeConstruct constructing) {
+ LifoAlloc::Mark mark = allocator_.mark();
+
+ RootedFunction fun(cx, &args.callee().as<JSFunction>());
+ RootedScript script(cx, fun->nonLazyScript());
+
+ Value* argv;
+ InterpreterFrame* fp = getCallFrame(cx, args, script, constructing, &argv);
+ if (!fp) {
+ return nullptr;
+ }
+
+ fp->mark_ = mark;
+ fp->initCallFrame(nullptr, nullptr, nullptr, *fun, script, argv,
+ args.length(), constructing);
+ return fp;
+}
+
+InterpreterFrame* InterpreterStack::pushExecuteFrame(
+ JSContext* cx, HandleScript script, HandleObject envChain,
+ AbstractFramePtr evalInFrame) {
+ LifoAlloc::Mark mark = allocator_.mark();
+
+ unsigned nvars = script->nslots();
+ uint8_t* buffer =
+ allocateFrame(cx, sizeof(InterpreterFrame) + nvars * sizeof(Value));
+ if (!buffer) {
+ return nullptr;
+ }
+
+ InterpreterFrame* fp = reinterpret_cast<InterpreterFrame*>(buffer);
+ fp->mark_ = mark;
+ fp->initExecuteFrame(cx, script, evalInFrame, envChain);
+ fp->initLocals();
+
+ return fp;
+}
+
+/*****************************************************************************/
+
+InterpreterFrameIterator& InterpreterFrameIterator::operator++() {
+ MOZ_ASSERT(!done());
+ if (fp_ != activation_->entryFrame_) {
+ pc_ = fp_->prevpc();
+ sp_ = fp_->prevsp();
+ fp_ = fp_->prev();
+ } else {
+ pc_ = nullptr;
+ sp_ = nullptr;
+ fp_ = nullptr;
+ }
+ return *this;
+}
+
+JS::ProfilingFrameIterator::ProfilingFrameIterator(
+ JSContext* cx, const RegisterState& state,
+ const Maybe<uint64_t>& samplePositionInProfilerBuffer)
+ : cx_(cx),
+ samplePositionInProfilerBuffer_(samplePositionInProfilerBuffer),
+ activation_(nullptr) {
+ if (!cx->runtime()->geckoProfiler().enabled()) {
+ MOZ_CRASH(
+ "ProfilingFrameIterator called when geckoProfiler not enabled for "
+ "runtime.");
+ }
+
+ if (!cx->profilingActivation()) {
+ return;
+ }
+
+ // If profiler sampling is not enabled, skip.
+ if (!cx->isProfilerSamplingEnabled()) {
+ return;
+ }
+
+ activation_ = cx->profilingActivation();
+
+ MOZ_ASSERT(activation_->isProfiling());
+
+ static_assert(sizeof(wasm::ProfilingFrameIterator) <= StorageSpace &&
+ sizeof(jit::JSJitProfilingFrameIterator) <= StorageSpace,
+ "ProfilingFrameIterator::storage_ is too small");
+ static_assert(alignof(void*) >= alignof(wasm::ProfilingFrameIterator) &&
+ alignof(void*) >= alignof(jit::JSJitProfilingFrameIterator),
+ "ProfilingFrameIterator::storage_ is too weakly aligned");
+
+ iteratorConstruct(state);
+ settle();
+}
+
+JS::ProfilingFrameIterator::~ProfilingFrameIterator() {
+ if (!done()) {
+ MOZ_ASSERT(activation_->isProfiling());
+ iteratorDestroy();
+ }
+}
+
+void JS::ProfilingFrameIterator::operator++() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(activation_->isJit());
+ if (isWasm()) {
+ ++wasmIter();
+ } else {
+ ++jsJitIter();
+ }
+ settle();
+}
+
+void JS::ProfilingFrameIterator::settleFrames() {
+ // Handle transition frames (see comment in JitFrameIter::operator++).
+ if (isJSJit() && !jsJitIter().done() &&
+ jsJitIter().frameType() == jit::FrameType::WasmToJSJit) {
+ wasm::Frame* fp = (wasm::Frame*)jsJitIter().fp();
+ iteratorDestroy();
+ new (storage()) wasm::ProfilingFrameIterator(fp);
+ kind_ = Kind::Wasm;
+ MOZ_ASSERT(!wasmIter().done());
+ maybeSetEndStackAddress(wasmIter().endStackAddress());
+ return;
+ }
+
+ if (isWasm() && wasmIter().done() && wasmIter().unwoundJitCallerFP()) {
+ uint8_t* fp = wasmIter().unwoundJitCallerFP();
+ iteratorDestroy();
+ // Using this ctor will skip the first jit->wasm frame, which is
+ // needed because the profiling iterator doesn't know how to unwind
+ // when the callee has no script.
+ new (storage())
+ jit::JSJitProfilingFrameIterator((jit::CommonFrameLayout*)fp);
+ kind_ = Kind::JSJit;
+ MOZ_ASSERT(!jsJitIter().done());
+ maybeSetEndStackAddress(jsJitIter().endStackAddress());
+ return;
+ }
+}
+
+void JS::ProfilingFrameIterator::settle() {
+ settleFrames();
+ while (iteratorDone()) {
+ iteratorDestroy();
+ activation_ = activation_->prevProfiling();
+ endStackAddress_ = nullptr;
+ if (!activation_) {
+ return;
+ }
+ iteratorConstruct();
+ settleFrames();
+ }
+}
+
+void JS::ProfilingFrameIterator::iteratorConstruct(const RegisterState& state) {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(activation_->isJit());
+
+ jit::JitActivation* activation = activation_->asJit();
+
+ // We want to know if we should start with a wasm profiling frame iterator
+ // or not. To determine this, there are three possibilities:
+ // - we've exited to C++ from wasm, in which case the activation
+ // exitFP low bit is tagged and we can test hasWasmExitFP().
+ // - we're in wasm code, so we can do a lookup on PC.
+ // - in all the other cases, we're not in wasm or we haven't exited from
+ // wasm.
+ if (activation->hasWasmExitFP() || wasm::InCompiledCode(state.pc)) {
+ new (storage()) wasm::ProfilingFrameIterator(*activation, state);
+ kind_ = Kind::Wasm;
+ maybeSetEndStackAddress(wasmIter().endStackAddress());
+ return;
+ }
+
+ new (storage()) jit::JSJitProfilingFrameIterator(cx_, state.pc, state.sp);
+ kind_ = Kind::JSJit;
+ maybeSetEndStackAddress(jsJitIter().endStackAddress());
+}
+
+void JS::ProfilingFrameIterator::iteratorConstruct() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(activation_->isJit());
+
+ jit::JitActivation* activation = activation_->asJit();
+
+ // The same reasoning as in the above iteratorConstruct variant applies
+ // here, except that it's even simpler: since this activation is higher up
+ // on the stack, it can only have exited to C++, through wasm or ion.
+ if (activation->hasWasmExitFP()) {
+ new (storage()) wasm::ProfilingFrameIterator(*activation);
+ kind_ = Kind::Wasm;
+ maybeSetEndStackAddress(wasmIter().endStackAddress());
+ return;
+ }
+
+ auto* fp = (jit::ExitFrameLayout*)activation->jsExitFP();
+ new (storage()) jit::JSJitProfilingFrameIterator(fp);
+ kind_ = Kind::JSJit;
+ maybeSetEndStackAddress(jsJitIter().endStackAddress());
+}
+
+void JS::ProfilingFrameIterator::iteratorDestroy() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(activation_->isJit());
+
+ if (isWasm()) {
+ wasmIter().~ProfilingFrameIterator();
+ return;
+ }
+
+ jsJitIter().~JSJitProfilingFrameIterator();
+}
+
+bool JS::ProfilingFrameIterator::iteratorDone() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(activation_->isJit());
+
+ if (isWasm()) {
+ return wasmIter().done();
+ }
+
+ return jsJitIter().done();
+}
+
+void* JS::ProfilingFrameIterator::stackAddress() const {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(activation_->isJit());
+
+ if (isWasm()) {
+ return wasmIter().stackAddress();
+ }
+
+ return jsJitIter().stackAddress();
+}
+
+Maybe<JS::ProfilingFrameIterator::Frame>
+JS::ProfilingFrameIterator::getPhysicalFrameAndEntry(
+ const jit::JitcodeGlobalEntry** entry) const {
+ *entry = nullptr;
+
+ void* stackAddr = stackAddress();
+
+ MOZ_DIAGNOSTIC_ASSERT(endStackAddress_);
+ MOZ_DIAGNOSTIC_ASSERT(stackAddr >= endStackAddress_);
+
+ if (isWasm()) {
+ Frame frame;
+ frame.kind = Frame_Wasm;
+ frame.stackAddress = stackAddr;
+ frame.returnAddress_ = nullptr;
+ frame.activation = activation_;
+ frame.label = nullptr;
+ frame.endStackAddress = endStackAddress_;
+ frame.interpreterScript = nullptr;
+ // TODO: get the realm ID of wasm frames. Bug 1596235.
+ frame.realmID = 0;
+ return mozilla::Some(frame);
+ }
+
+ MOZ_ASSERT(isJSJit());
+
+ // Look up an entry for the return address.
+ void* returnAddr = jsJitIter().resumePCinCurrentFrame();
+ jit::JitcodeGlobalTable* table =
+ cx_->runtime()->jitRuntime()->getJitcodeGlobalTable();
+
+ // NB:
+ // The following lookups should be infallible, but the ad-hoc stackwalking
+ // code rots easily and corner cases where frames can't be looked up
+ // occur too often (e.g. once every day).
+ //
+ // The calls to `lookup*` below have been changed from infallible ones to
+ // fallible ones. The proper solution to this problem is to fix all
+ // the jitcode to use frame-pointers and reliably walk the stack with those.
+ if (samplePositionInProfilerBuffer_) {
+ *entry = table->lookupForSampler(returnAddr, cx_->runtime(),
+ *samplePositionInProfilerBuffer_);
+ } else {
+ *entry = table->lookup(returnAddr);
+ }
+
+ // Failed to look up a jitcode entry for the given address, ignore.
+ if (!*entry) {
+ return mozilla::Nothing();
+ }
+
+ // Dummy frames produce no stack frames.
+ if ((*entry)->isDummy()) {
+ return mozilla::Nothing();
+ }
+
+ Frame frame;
+ if ((*entry)->isBaselineInterpreter()) {
+ frame.kind = Frame_BaselineInterpreter;
+ } else if ((*entry)->isBaseline()) {
+ frame.kind = Frame_Baseline;
+ } else {
+ MOZ_ASSERT((*entry)->isIon() || (*entry)->isIonIC());
+ frame.kind = Frame_Ion;
+ }
+ frame.stackAddress = stackAddr;
+ if ((*entry)->isBaselineInterpreter()) {
+ frame.label = jsJitIter().baselineInterpreterLabel();
+ jsJitIter().baselineInterpreterScriptPC(
+ &frame.interpreterScript, &frame.interpreterPC_, &frame.realmID);
+ MOZ_ASSERT(frame.interpreterScript);
+ MOZ_ASSERT(frame.interpreterPC_);
+ } else {
+ frame.interpreterScript = nullptr;
+ frame.returnAddress_ = returnAddr;
+ frame.label = nullptr;
+ frame.realmID = 0;
+ }
+ frame.activation = activation_;
+ frame.endStackAddress = endStackAddress_;
+ return mozilla::Some(frame);
+}
+
+uint32_t JS::ProfilingFrameIterator::extractStack(Frame* frames,
+ uint32_t offset,
+ uint32_t end) const {
+ if (offset >= end) {
+ return 0;
+ }
+
+ const jit::JitcodeGlobalEntry* entry;
+ Maybe<Frame> physicalFrame = getPhysicalFrameAndEntry(&entry);
+
+ // Dummy frames produce no stack frames.
+ if (physicalFrame.isNothing()) {
+ return 0;
+ }
+
+ if (isWasm()) {
+ frames[offset] = physicalFrame.value();
+ frames[offset].label = wasmIter().label();
+ return 1;
+ }
+
+ if (physicalFrame->kind == Frame_BaselineInterpreter) {
+ frames[offset] = physicalFrame.value();
+ return 1;
+ }
+
+ // Extract the stack for the entry. Assume maximum inlining depth is <64
+ const char* labels[64];
+ uint32_t depth = entry->callStackAtAddr(cx_->runtime(),
+ jsJitIter().resumePCinCurrentFrame(),
+ labels, std::size(labels));
+ MOZ_ASSERT(depth < std::size(labels));
+ for (uint32_t i = 0; i < depth; i++) {
+ if (offset + i >= end) {
+ return i;
+ }
+ frames[offset + i] = physicalFrame.value();
+ frames[offset + i].label = labels[i];
+ }
+
+ return depth;
+}
+
+Maybe<JS::ProfilingFrameIterator::Frame>
+JS::ProfilingFrameIterator::getPhysicalFrameWithoutLabel() const {
+ const jit::JitcodeGlobalEntry* unused;
+ return getPhysicalFrameAndEntry(&unused);
+}
+
+bool JS::ProfilingFrameIterator::isWasm() const {
+ MOZ_ASSERT(!done());
+ return kind_ == Kind::Wasm;
+}
+
+bool JS::ProfilingFrameIterator::isJSJit() const {
+ return kind_ == Kind::JSJit;
+}
+
+mozilla::Maybe<JS::ProfilingFrameIterator::RegisterState>
+JS::ProfilingFrameIterator::getCppEntryRegisters() const {
+ if (!isJSJit()) {
+ return mozilla::Nothing{};
+ }
+ return jit::JitRuntime::getCppEntryRegisters(jsJitIter().framePtr());
+}
diff --git a/js/src/vm/Stack.h b/js/src/vm/Stack.h
new file mode 100644
index 0000000000..d537cb80af
--- /dev/null
+++ b/js/src/vm/Stack.h
@@ -0,0 +1,999 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Stack_h
+#define vm_Stack_h
+
+#include "mozilla/HashFunctions.h"
+#include "mozilla/MemoryReporting.h"
+
+#include <algorithm>
+#include <type_traits>
+
+#include "js/ErrorReport.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/RootingAPI.h"
+#include "js/TypeDecls.h"
+#include "js/ValueArray.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/JSFunction.h"
+#include "vm/JSScript.h"
+#include "wasm/WasmDebugFrame.h" // js::wasm::DebugFrame
+
+namespace js {
+
+class InterpreterRegs;
+class CallObject;
+class FrameIter;
+class ClassBodyScope;
+class EnvironmentObject;
+class BlockLexicalEnvironmentObject;
+class ExtensibleLexicalEnvironmentObject;
+class GeckoProfilerRuntime;
+class InterpreterFrame;
+class EnvironmentIter;
+class EnvironmentCoordinate;
+
+namespace jit {
+class CommonFrameLayout;
+}
+namespace wasm {
+class Instance;
+} // namespace wasm
+
+// [SMDOC] VM stack layout
+//
+// A JSRuntime's stack consists of a linked list of activations. Every
+// activation contains a number of scripted frames that are either running in
+// the interpreter (InterpreterActivation) or JIT code (JitActivation). The
+// frames inside a single activation are contiguous: whenever C++ calls back
+// into JS, a new activation is pushed.
+//
+// Every activation is tied to a single JSContext and JS::Compartment. This
+// means we can reconstruct a given context's stack by skipping activations
+// belonging to other contexts. This happens whenever an embedding enters the JS
+// engine on cx1 and then, from a native called by the JS engine, reenters the
+// VM on cx2.
+
+// Interpreter frames (InterpreterFrame)
+//
+// Each interpreter script activation (global or function code) is given a
+// fixed-size header (js::InterpreterFrame). The frame contains bookkeeping
+// information about the activation and links to the previous frame.
+//
+// The values after an InterpreterFrame in memory are its locals followed by its
+// expression stack. InterpreterFrame::argv_ points to the frame's arguments.
+// Missing formal arguments are padded with |undefined|, so the number of
+// arguments is always >= the number of formals.
+//
+// The top of an activation's current frame's expression stack is pointed to by
+// the activation's "current regs", which contains the stack pointer 'sp'. In
+// the interpreter, sp is adjusted as individual values are pushed and popped
+// from the stack and the InterpreterRegs struct (pointed to by the
+// InterpreterActivation) is a local var of js::Interpret.
+
+enum MaybeCheckAliasing { CHECK_ALIASING = true, DONT_CHECK_ALIASING = false };
+
+} // namespace js
+
+/*****************************************************************************/
+
+namespace js {
+
+namespace jit {
+class BaselineFrame;
+class RematerializedFrame;
+} // namespace jit
+
+/**
+ * Pointer to a live JS or WASM stack frame.
+ */
+class AbstractFramePtr {
+ friend class FrameIter;
+
+ uintptr_t ptr_;
+
+ enum {
+ Tag_InterpreterFrame = 0x1,
+ Tag_BaselineFrame = 0x2,
+ Tag_RematerializedFrame = 0x3,
+ Tag_WasmDebugFrame = 0x4,
+ TagMask = 0x7
+ };
+
+ public:
+ AbstractFramePtr() : ptr_(0) {}
+
+ MOZ_IMPLICIT AbstractFramePtr(InterpreterFrame* fp)
+ : ptr_(fp ? uintptr_t(fp) | Tag_InterpreterFrame : 0) {
+ MOZ_ASSERT_IF(fp, asInterpreterFrame() == fp);
+ }
+
+ MOZ_IMPLICIT AbstractFramePtr(jit::BaselineFrame* fp)
+ : ptr_(fp ? uintptr_t(fp) | Tag_BaselineFrame : 0) {
+ MOZ_ASSERT_IF(fp, asBaselineFrame() == fp);
+ }
+
+ MOZ_IMPLICIT AbstractFramePtr(jit::RematerializedFrame* fp)
+ : ptr_(fp ? uintptr_t(fp) | Tag_RematerializedFrame : 0) {
+ MOZ_ASSERT_IF(fp, asRematerializedFrame() == fp);
+ }
+
+ MOZ_IMPLICIT AbstractFramePtr(wasm::DebugFrame* fp)
+ : ptr_(fp ? uintptr_t(fp) | Tag_WasmDebugFrame : 0) {
+ static_assert(wasm::DebugFrame::Alignment >= TagMask, "aligned");
+ MOZ_ASSERT_IF(fp, asWasmDebugFrame() == fp);
+ }
+
+ bool isInterpreterFrame() const {
+ return (ptr_ & TagMask) == Tag_InterpreterFrame;
+ }
+ InterpreterFrame* asInterpreterFrame() const {
+ MOZ_ASSERT(isInterpreterFrame());
+ InterpreterFrame* res = (InterpreterFrame*)(ptr_ & ~TagMask);
+ MOZ_ASSERT(res);
+ return res;
+ }
+ bool isBaselineFrame() const { return (ptr_ & TagMask) == Tag_BaselineFrame; }
+ jit::BaselineFrame* asBaselineFrame() const {
+ MOZ_ASSERT(isBaselineFrame());
+ jit::BaselineFrame* res = (jit::BaselineFrame*)(ptr_ & ~TagMask);
+ MOZ_ASSERT(res);
+ return res;
+ }
+ bool isRematerializedFrame() const {
+ return (ptr_ & TagMask) == Tag_RematerializedFrame;
+ }
+ jit::RematerializedFrame* asRematerializedFrame() const {
+ MOZ_ASSERT(isRematerializedFrame());
+ jit::RematerializedFrame* res =
+ (jit::RematerializedFrame*)(ptr_ & ~TagMask);
+ MOZ_ASSERT(res);
+ return res;
+ }
+ bool isWasmDebugFrame() const {
+ return (ptr_ & TagMask) == Tag_WasmDebugFrame;
+ }
+ wasm::DebugFrame* asWasmDebugFrame() const {
+ MOZ_ASSERT(isWasmDebugFrame());
+ wasm::DebugFrame* res = (wasm::DebugFrame*)(ptr_ & ~TagMask);
+ MOZ_ASSERT(res);
+ return res;
+ }
+
+ void* raw() const { return reinterpret_cast<void*>(ptr_); }
+
+ bool operator==(const AbstractFramePtr& other) const {
+ return ptr_ == other.ptr_;
+ }
+ bool operator!=(const AbstractFramePtr& other) const {
+ return ptr_ != other.ptr_;
+ }
+
+ explicit operator bool() const { return !!ptr_; }
+
+ inline JSObject* environmentChain() const;
+ inline CallObject& callObj() const;
+ inline bool initFunctionEnvironmentObjects(JSContext* cx);
+ inline bool pushVarEnvironment(JSContext* cx, Handle<Scope*> scope);
+ template <typename SpecificEnvironment>
+ inline void pushOnEnvironmentChain(SpecificEnvironment& env);
+ template <typename SpecificEnvironment>
+ inline void popOffEnvironmentChain();
+
+ inline JS::Realm* realm() const;
+
+ inline bool hasInitialEnvironment() const;
+ inline bool isGlobalFrame() const;
+ inline bool isModuleFrame() const;
+ inline bool isEvalFrame() const;
+ inline bool isDebuggerEvalFrame() const;
+
+ inline bool hasScript() const;
+ inline JSScript* script() const;
+ inline wasm::Instance* wasmInstance() const;
+ inline GlobalObject* global() const;
+ inline bool hasGlobal(const GlobalObject* global) const;
+ inline JSFunction* callee() const;
+ inline Value calleev() const;
+ inline Value& thisArgument() const;
+
+ inline bool isConstructing() const;
+
+ inline bool debuggerNeedsCheckPrimitiveReturn() const;
+
+ inline bool isFunctionFrame() const;
+ inline bool isGeneratorFrame() const;
+
+ inline bool saveGeneratorSlots(JSContext* cx, unsigned nslots,
+ ArrayObject* dest) const;
+
+ inline bool hasCachedSavedFrame() const;
+
+ inline unsigned numActualArgs() const;
+ inline unsigned numFormalArgs() const;
+
+ inline Value* argv() const;
+
+ inline bool hasArgs() const;
+ inline bool hasArgsObj() const;
+ inline ArgumentsObject& argsObj() const;
+ inline void initArgsObj(ArgumentsObject& argsobj) const;
+
+ inline Value& unaliasedLocal(uint32_t i);
+ inline Value& unaliasedFormal(
+ unsigned i, MaybeCheckAliasing checkAliasing = CHECK_ALIASING);
+ inline Value& unaliasedActual(
+ unsigned i, MaybeCheckAliasing checkAliasing = CHECK_ALIASING);
+ template <class Op>
+ inline void unaliasedForEachActual(JSContext* cx, Op op);
+
+ inline bool prevUpToDate() const;
+ inline void setPrevUpToDate() const;
+ inline void unsetPrevUpToDate() const;
+
+ inline bool isDebuggee() const;
+ inline void setIsDebuggee();
+ inline void unsetIsDebuggee();
+
+ inline HandleValue returnValue() const;
+ inline void setReturnValue(const Value& rval) const;
+
+ friend void GDBTestInitAbstractFramePtr(AbstractFramePtr&, InterpreterFrame*);
+ friend void GDBTestInitAbstractFramePtr(AbstractFramePtr&,
+ jit::BaselineFrame*);
+ friend void GDBTestInitAbstractFramePtr(AbstractFramePtr&,
+ jit::RematerializedFrame*);
+ friend void GDBTestInitAbstractFramePtr(AbstractFramePtr& frame,
+ wasm::DebugFrame* ptr);
+};
+
+class NullFramePtr : public AbstractFramePtr {
+ public:
+ NullFramePtr() : AbstractFramePtr() {}
+};
+
+enum MaybeConstruct { NO_CONSTRUCT = false, CONSTRUCT = true };
+
+/*****************************************************************************/
+
+class InterpreterFrame {
+ enum Flags : uint32_t {
+ CONSTRUCTING = 0x1, /* frame is for a constructor invocation */
+
+ RESUMED_GENERATOR = 0x2, /* frame is for a resumed generator invocation */
+
+ /* Function prologue state */
+ HAS_INITIAL_ENV =
+ 0x4, /* callobj created for function or var env for eval */
+ HAS_ARGS_OBJ = 0x8, /* ArgumentsObject created for needsArgsObj script */
+
+ /* Lazy frame initialization */
+ HAS_RVAL = 0x10, /* frame has rval_ set */
+
+ /* Debugger state */
+ PREV_UP_TO_DATE = 0x20, /* see DebugScopes::updateLiveScopes */
+
+ /*
+ * See comment above 'isDebuggee' in Realm.h for explanation of
+ * invariants of debuggee compartments, scripts, and frames.
+ */
+ DEBUGGEE = 0x40, /* Execution is being observed by Debugger */
+
+ /* Used in tracking calls and profiling (see vm/GeckoProfiler.cpp) */
+ HAS_PUSHED_PROF_FRAME = 0x80, /* Gecko Profiler was notified of entry */
+
+ /*
+ * If set, we entered one of the JITs and ScriptFrameIter should skip
+ * this frame.
+ */
+ RUNNING_IN_JIT = 0x100,
+
+ /*
+ * If set, this frame has been on the stack when
+ * |js::SavedStacks::saveCurrentStack| was called, and so there is a
+ * |js::SavedFrame| object cached for this frame.
+ */
+ HAS_CACHED_SAVED_FRAME = 0x200,
+ };
+
+ mutable uint32_t flags_; /* bits described by Flags */
+ uint32_t nactual_; /* number of actual arguments, for function frames */
+ JSScript* script_; /* the script we're executing */
+ JSObject* envChain_; /* current environment chain */
+ Value rval_; /* if HAS_RVAL, return value of the frame */
+ ArgumentsObject* argsObj_; /* if HAS_ARGS_OBJ, the call's arguments object */
+
+ /*
+ * Previous frame and its pc and sp. Always nullptr for
+ * InterpreterActivation's entry frame, always non-nullptr for inline
+ * frames.
+ */
+ InterpreterFrame* prev_;
+ jsbytecode* prevpc_;
+ Value* prevsp_;
+
+ /*
+ * For an eval-in-frame DEBUGGER_EVAL frame, the frame in whose scope
+ * we're evaluating code. Iteration treats this as our previous frame.
+ */
+ AbstractFramePtr evalInFramePrev_;
+
+ Value* argv_; /* If hasArgs(), points to frame's arguments. */
+ LifoAlloc::Mark mark_; /* Used to release memory for this frame. */
+
+ static void staticAsserts() {
+ static_assert(offsetof(InterpreterFrame, rval_) % sizeof(Value) == 0);
+ static_assert(sizeof(InterpreterFrame) % sizeof(Value) == 0);
+ }
+
+ /*
+ * The utilities are private since they are not able to assert that only
+ * unaliased vars/formals are accessed. Normal code should prefer the
+ * InterpreterFrame::unaliased* members (or InterpreterRegs::stackDepth for
+ * the usual "depth is at least" assertions).
+ */
+ Value* slots() const { return (Value*)(this + 1); }
+ Value* base() const { return slots() + script()->nfixed(); }
+
+ friend class FrameIter;
+ friend class InterpreterRegs;
+ friend class InterpreterStack;
+ friend class jit::BaselineFrame;
+
+ /*
+ * Frame initialization, called by InterpreterStack operations after acquiring
+ * the raw memory for the frame:
+ */
+
+ /* Used for Invoke and Interpret. */
+ void initCallFrame(InterpreterFrame* prev, jsbytecode* prevpc, Value* prevsp,
+ JSFunction& callee, JSScript* script, Value* argv,
+ uint32_t nactual, MaybeConstruct constructing);
+
+ /* Used for eval, module or global frames. */
+ void initExecuteFrame(JSContext* cx, HandleScript script,
+ AbstractFramePtr prev, HandleObject envChain);
+
+ public:
+ /*
+ * Frame prologue/epilogue
+ *
+ * Every stack frame must have 'prologue' called before executing the
+ * first op and 'epilogue' called after executing the last op and before
+ * popping the frame (whether the exit is exceptional or not).
+ *
+ * For inline JS calls/returns, it is easy to call the prologue/epilogue
+ * exactly once. When calling JS from C++, Invoke/Execute push the stack
+ * frame but do *not* call the prologue/epilogue. That means Interpret
+ * must call the prologue/epilogue for the entry frame. This scheme
+ * simplifies jit compilation.
+ *
+ * An important corner case is what happens when an error occurs (OOM,
+ * over-recursed) after pushing the stack frame but before 'prologue' is
+ * called or completes fully. To simplify usage, 'epilogue' does not assume
+ * 'prologue' has completed and handles all the intermediate state details.
+ */
+
+ bool prologue(JSContext* cx);
+ void epilogue(JSContext* cx, jsbytecode* pc);
+
+ bool checkReturn(JSContext* cx, HandleValue thisv, MutableHandleValue result);
+
+ bool initFunctionEnvironmentObjects(JSContext* cx);
+
+ /*
+ * Initialize locals of newly-pushed frame to undefined.
+ */
+ void initLocals();
+
+ /*
+ * Stack frame type
+ *
+ * A stack frame may have one of four types, which determines which
+ * members of the frame may be accessed and other invariants:
+ *
+ * global frame: execution of global code
+ * function frame: execution of function code
+ * module frame: execution of a module
+ * eval frame: execution of eval code
+ */
+
+ bool isGlobalFrame() const { return script_->isGlobalCode(); }
+
+ bool isModuleFrame() const { return script_->isModule(); }
+
+ bool isEvalFrame() const { return script_->isForEval(); }
+
+ bool isFunctionFrame() const { return script_->isFunction(); }
+
+ /*
+ * Previous frame
+ *
+ * A frame's 'prev' frame is either null or the previous frame pointed to
+ * by cx->regs->fp when this frame was pushed. Often, given two prev-linked
+ * frames, the next-frame is a function or eval that was called by the
+ * prev-frame, but not always: the prev-frame may have called a native that
+ * reentered the VM through JS_CallFunctionValue on the same context
+ * (without calling JS_SaveFrameChain) which pushed the next-frame. Thus,
+ * 'prev' has little semantic meaning and basically just tells the VM what
+ * to set cx->regs->fp to when this frame is popped.
+ */
+
+ InterpreterFrame* prev() const { return prev_; }
+
+ AbstractFramePtr evalInFramePrev() const {
+ MOZ_ASSERT(isEvalFrame());
+ return evalInFramePrev_;
+ }
+
+ /*
+ * (Unaliased) locals and arguments
+ *
+ * Only non-eval function frames have arguments. The arguments pushed by
+ * the caller are the 'actual' arguments. The declared arguments of the
+ * callee are the 'formal' arguments. When the caller passes less actual
+ * arguments, missing formal arguments are padded with |undefined|.
+ *
+ * When a local/formal variable is aliased (accessed by nested closures,
+ * environment operations, or 'arguments'), the canonical location for
+ * that value is the slot of an environment object. Aliased locals don't
+ * have stack slots assigned to them. These functions assert that
+ * accesses to stack values are unaliased.
+ */
+
+ inline Value& unaliasedLocal(uint32_t i);
+
+ bool hasArgs() const { return isFunctionFrame(); }
+ inline Value& unaliasedFormal(unsigned i,
+ MaybeCheckAliasing = CHECK_ALIASING);
+ inline Value& unaliasedActual(unsigned i,
+ MaybeCheckAliasing = CHECK_ALIASING);
+ template <class Op>
+ inline void unaliasedForEachActual(Op op);
+
+ unsigned numFormalArgs() const {
+ MOZ_ASSERT(hasArgs());
+ return callee().nargs();
+ }
+ unsigned numActualArgs() const {
+ MOZ_ASSERT(hasArgs());
+ return nactual_;
+ }
+
+ /* Watch out, this exposes a pointer to the unaliased formal arg array. */
+ Value* argv() const {
+ MOZ_ASSERT(hasArgs());
+ return argv_;
+ }
+
+ /*
+ * Arguments object
+ *
+ * If a non-eval function has script->needsArgsObj, an arguments object is
+ * created in the prologue and stored in the local variable for the
+ * 'arguments' binding (script->argumentsLocal). Since this local is
+ * mutable, the arguments object can be overwritten and we can "lose" the
+ * arguments object. Thus, InterpreterFrame keeps an explicit argsObj_ field
+ * so that the original arguments object is always available.
+ */
+
+ ArgumentsObject& argsObj() const;
+ void initArgsObj(ArgumentsObject& argsobj);
+
+ ArrayObject* createRestParameter(JSContext* cx);
+
+ /*
+ * Environment chain
+ *
+ * In theory, the environment chain would contain an object for every
+ * lexical scope. However, only objects that are required for dynamic
+ * lookup are actually created.
+ *
+ * Given that an InterpreterFrame corresponds roughly to a ES Execution
+ * Context (ES 10.3), GetVariablesObject corresponds to the
+ * VariableEnvironment component of a Exection Context. Intuitively, the
+ * variables object is where new bindings (variables and functions) are
+ * stored. One might expect that this is either the Call object or
+ * envChain.globalObj for function or global code, respectively, however
+ * the JSAPI allows calls of Execute to specify a variables object on the
+ * environment chain other than the call/global object. This allows
+ * embeddings to run multiple scripts under the same global, each time
+ * using a new variables object to collect and discard the script's global
+ * variables.
+ */
+
+ inline HandleObject environmentChain() const;
+
+ inline EnvironmentObject& aliasedEnvironment(EnvironmentCoordinate ec) const;
+ inline EnvironmentObject& aliasedEnvironmentMaybeDebug(
+ EnvironmentCoordinate ec) const;
+ inline GlobalObject& global() const;
+ inline CallObject& callObj() const;
+ inline ExtensibleLexicalEnvironmentObject& extensibleLexicalEnvironment()
+ const;
+
+ template <typename SpecificEnvironment>
+ inline void pushOnEnvironmentChain(SpecificEnvironment& env);
+ template <typename SpecificEnvironment>
+ inline void popOffEnvironmentChain();
+ inline void replaceInnermostEnvironment(BlockLexicalEnvironmentObject& env);
+
+ // Push a VarEnvironmentObject for function frames of functions that have
+ // parameter expressions with closed over var bindings.
+ bool pushVarEnvironment(JSContext* cx, Handle<Scope*> scope);
+
+ /*
+ * For lexical envs with aliased locals, these interfaces push and pop
+ * entries on the environment chain. The "freshen" operation replaces the
+ * current lexical env with a fresh copy of it, to implement semantics
+ * providing distinct bindings per iteration of a for(;;) loop whose head
+ * has a lexical declaration. The "recreate" operation replaces the
+ * current lexical env with a copy of it containing uninitialized
+ * bindings, to implement semantics providing distinct bindings per
+ * iteration of a for-in/of loop.
+ */
+
+ bool pushLexicalEnvironment(JSContext* cx, Handle<LexicalScope*> scope);
+ bool freshenLexicalEnvironment(JSContext* cx);
+ bool recreateLexicalEnvironment(JSContext* cx);
+
+ bool pushClassBodyEnvironment(JSContext* cx, Handle<ClassBodyScope*> scope);
+
+ /*
+ * Script
+ *
+ * All frames have an associated JSScript which holds the bytecode being
+ * executed for the frame.
+ */
+
+ JSScript* script() const { return script_; }
+
+ /* Return the previous frame's pc. */
+ jsbytecode* prevpc() {
+ MOZ_ASSERT(prev_);
+ return prevpc_;
+ }
+
+ /* Return the previous frame's sp. */
+ Value* prevsp() {
+ MOZ_ASSERT(prev_);
+ return prevsp_;
+ }
+
+ /*
+ * Return the 'this' argument passed to a non-eval function frame. This is
+ * not necessarily the frame's this-binding, for instance non-strict
+ * functions will box primitive 'this' values and thisArgument() will
+ * return the original, unboxed Value.
+ */
+ Value& thisArgument() const {
+ MOZ_ASSERT(isFunctionFrame());
+ return argv()[-1];
+ }
+
+ /*
+ * Callee
+ *
+ * Only function frames have a true callee. An eval frame in a function has
+ * the same callee as its containing function frame. An async module has to
+ * create a wrapper callee to allow passing the script to generators for
+ * pausing and resuming.
+ */
+
+ JSFunction& callee() const {
+ MOZ_ASSERT(isFunctionFrame());
+ return calleev().toObject().as<JSFunction>();
+ }
+
+ const Value& calleev() const {
+ MOZ_ASSERT(isFunctionFrame());
+ return argv()[-2];
+ }
+
+ /*
+ * New Target
+ *
+ * Only non-arrow function frames have a meaningful newTarget.
+ */
+ Value newTarget() const {
+ MOZ_ASSERT(isFunctionFrame());
+ MOZ_ASSERT(!callee().isArrow());
+
+ if (isConstructing()) {
+ unsigned pushedArgs = std::max(numFormalArgs(), numActualArgs());
+ return argv()[pushedArgs];
+ }
+ return UndefinedValue();
+ }
+
+ /* Profiler flags */
+
+ bool hasPushedGeckoProfilerFrame() {
+ return !!(flags_ & HAS_PUSHED_PROF_FRAME);
+ }
+
+ void setPushedGeckoProfilerFrame() { flags_ |= HAS_PUSHED_PROF_FRAME; }
+
+ void unsetPushedGeckoProfilerFrame() { flags_ &= ~HAS_PUSHED_PROF_FRAME; }
+
+ /* Return value */
+
+ bool hasReturnValue() const { return flags_ & HAS_RVAL; }
+
+ MutableHandleValue returnValue() {
+ if (!hasReturnValue()) {
+ rval_.setUndefined();
+ }
+ return MutableHandleValue::fromMarkedLocation(&rval_);
+ }
+
+ void markReturnValue() { flags_ |= HAS_RVAL; }
+
+ void setReturnValue(const Value& v) {
+ rval_ = v;
+ markReturnValue();
+ }
+
+ // Copy values from this frame into a private Array, owned by the
+ // GeneratorObject, for suspending.
+ [[nodiscard]] inline bool saveGeneratorSlots(JSContext* cx, unsigned nslots,
+ ArrayObject* dest) const;
+
+ // Copy values from the Array into this stack frame, for resuming.
+ inline void restoreGeneratorSlots(ArrayObject* src);
+
+ void resumeGeneratorFrame(JSObject* envChain) {
+ MOZ_ASSERT(script()->isGenerator() || script()->isAsync());
+ MOZ_ASSERT_IF(!script()->isModule(), isFunctionFrame());
+ flags_ |= HAS_INITIAL_ENV;
+ envChain_ = envChain;
+ }
+
+ /*
+ * Other flags
+ */
+
+ bool isConstructing() const { return !!(flags_ & CONSTRUCTING); }
+
+ void setResumedGenerator() { flags_ |= RESUMED_GENERATOR; }
+ bool isResumedGenerator() const { return !!(flags_ & RESUMED_GENERATOR); }
+
+ /*
+ * These two queries should not be used in general: the presence/absence of
+ * the call/args object is determined by the static(ish) properties of the
+ * JSFunction/JSScript. These queries should only be performed when probing
+ * a stack frame that may be in the middle of the prologue (during which
+ * time the call/args object are created).
+ */
+
+ inline bool hasInitialEnvironment() const;
+
+ bool hasInitialEnvironmentUnchecked() const {
+ return flags_ & HAS_INITIAL_ENV;
+ }
+
+ bool hasArgsObj() const {
+ MOZ_ASSERT(script()->needsArgsObj());
+ return flags_ & HAS_ARGS_OBJ;
+ }
+
+ /*
+ * Debugger eval frames.
+ *
+ * - If evalInFramePrev_ is non-null, frame was created for an "eval in
+ * frame" call, which can push a successor to any live frame; so its
+ * logical "prev" frame is not necessarily the previous frame in memory.
+ * Iteration should treat evalInFramePrev_ as this frame's previous frame.
+ *
+ * - Don't bother to JIT it, because it's probably short-lived.
+ *
+ * - It is required to have a environment chain object outside the
+ * js::EnvironmentObject hierarchy: either a global object, or a
+ * DebugEnvironmentProxy.
+ */
+ bool isDebuggerEvalFrame() const {
+ return isEvalFrame() && !!evalInFramePrev_;
+ }
+
+ bool prevUpToDate() const { return !!(flags_ & PREV_UP_TO_DATE); }
+
+ void setPrevUpToDate() { flags_ |= PREV_UP_TO_DATE; }
+
+ void unsetPrevUpToDate() { flags_ &= ~PREV_UP_TO_DATE; }
+
+ bool isDebuggee() const { return !!(flags_ & DEBUGGEE); }
+
+ void setIsDebuggee() { flags_ |= DEBUGGEE; }
+
+ inline void unsetIsDebuggee();
+
+ bool hasCachedSavedFrame() const { return flags_ & HAS_CACHED_SAVED_FRAME; }
+ void setHasCachedSavedFrame() { flags_ |= HAS_CACHED_SAVED_FRAME; }
+ void clearHasCachedSavedFrame() { flags_ &= ~HAS_CACHED_SAVED_FRAME; }
+
+ public:
+ void trace(JSTracer* trc, Value* sp, jsbytecode* pc);
+ void traceValues(JSTracer* trc, unsigned start, unsigned end);
+
+ // Entered Baseline/Ion from the interpreter.
+ bool runningInJit() const { return !!(flags_ & RUNNING_IN_JIT); }
+ void setRunningInJit() { flags_ |= RUNNING_IN_JIT; }
+ void clearRunningInJit() { flags_ &= ~RUNNING_IN_JIT; }
+};
+
+/*****************************************************************************/
+
+class InterpreterRegs {
+ public:
+ Value* sp;
+ jsbytecode* pc;
+
+ private:
+ InterpreterFrame* fp_;
+
+ public:
+ InterpreterFrame* fp() const { return fp_; }
+
+ unsigned stackDepth() const {
+ MOZ_ASSERT(sp >= fp_->base());
+ return sp - fp_->base();
+ }
+
+ Value* spForStackDepth(unsigned depth) const {
+ MOZ_ASSERT(fp_->script()->nfixed() + depth <= fp_->script()->nslots());
+ return fp_->base() + depth;
+ }
+
+ void popInlineFrame() {
+ pc = fp_->prevpc();
+ unsigned spForNewTarget =
+ fp_->isResumedGenerator() ? 0 : fp_->isConstructing();
+ // This code is called when resuming from async and generator code.
+ // In the case of modules, we don't have arguments, so we can't use
+ // numActualArgs, which asserts 'hasArgs'.
+ unsigned nActualArgs = fp_->isModuleFrame() ? 0 : fp_->numActualArgs();
+ sp = fp_->prevsp() - nActualArgs - 1 - spForNewTarget;
+ fp_ = fp_->prev();
+ MOZ_ASSERT(fp_);
+ }
+ void prepareToRun(InterpreterFrame& fp, JSScript* script) {
+ pc = script->code();
+ sp = fp.slots() + script->nfixed();
+ fp_ = &fp;
+ }
+
+ void setToEndOfScript();
+
+ MutableHandleValue stackHandleAt(int i) {
+ return MutableHandleValue::fromMarkedLocation(&sp[i]);
+ }
+
+ HandleValue stackHandleAt(int i) const {
+ return HandleValue::fromMarkedLocation(&sp[i]);
+ }
+
+ friend void GDBTestInitInterpreterRegs(InterpreterRegs&,
+ js::InterpreterFrame*, JS::Value*,
+ uint8_t*);
+};
+
+/*****************************************************************************/
+
+class InterpreterStack {
+ friend class InterpreterActivation;
+
+ static const size_t DEFAULT_CHUNK_SIZE = 4 * 1024;
+ LifoAlloc allocator_;
+
+ // Number of interpreter frames on the stack, for over-recursion checks.
+ static const size_t MAX_FRAMES = 50 * 1000;
+ static const size_t MAX_FRAMES_TRUSTED = MAX_FRAMES + 1000;
+ size_t frameCount_;
+
+ inline uint8_t* allocateFrame(JSContext* cx, size_t size);
+
+ inline InterpreterFrame* getCallFrame(JSContext* cx, const CallArgs& args,
+ HandleScript script,
+ MaybeConstruct constructing,
+ Value** pargv);
+
+ void releaseFrame(InterpreterFrame* fp) {
+ frameCount_--;
+ allocator_.release(fp->mark_);
+ }
+
+ public:
+ InterpreterStack() : allocator_(DEFAULT_CHUNK_SIZE), frameCount_(0) {}
+
+ ~InterpreterStack() { MOZ_ASSERT(frameCount_ == 0); }
+
+ // For execution of eval, module or global code.
+ InterpreterFrame* pushExecuteFrame(JSContext* cx, HandleScript script,
+ HandleObject envChain,
+ AbstractFramePtr evalInFrame);
+
+ // Called to invoke a function.
+ InterpreterFrame* pushInvokeFrame(JSContext* cx, const CallArgs& args,
+ MaybeConstruct constructing);
+
+ // The interpreter can push light-weight, "inline" frames without entering a
+ // new InterpreterActivation or recursively calling Interpret.
+ bool pushInlineFrame(JSContext* cx, InterpreterRegs& regs,
+ const CallArgs& args, HandleScript script,
+ MaybeConstruct constructing);
+
+ void popInlineFrame(InterpreterRegs& regs);
+
+ bool resumeGeneratorCallFrame(JSContext* cx, InterpreterRegs& regs,
+ HandleFunction callee, HandleObject envChain);
+
+ inline void purge(JSRuntime* rt);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return allocator_.sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+void TraceInterpreterActivations(JSContext* cx, JSTracer* trc);
+
+/*****************************************************************************/
+
+/** Base class for all function call args. */
+class AnyInvokeArgs : public JS::CallArgs {};
+
+/** Base class for all function construction args. */
+class AnyConstructArgs : public JS::CallArgs {
+ // Only js::Construct (or internal methods that call the qualified CallArgs
+ // versions) should do these things!
+ void setCallee(const Value& v) = delete;
+ void setThis(const Value& v) = delete;
+ MutableHandleValue newTarget() const = delete;
+ MutableHandleValue rval() const = delete;
+};
+
+namespace detail {
+
+/** Function call/construct args of statically-unknown count. */
+template <MaybeConstruct Construct>
+class GenericArgsBase
+ : public std::conditional_t<Construct, AnyConstructArgs, AnyInvokeArgs> {
+ protected:
+ RootedValueVector v_;
+
+ explicit GenericArgsBase(JSContext* cx) : v_(cx) {}
+
+ public:
+ bool init(JSContext* cx, uint64_t argc) {
+ if (argc > ARGS_LENGTH_MAX) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TOO_MANY_ARGUMENTS);
+ return false;
+ }
+
+ // callee, this, arguments[, new.target iff constructing]
+ size_t len = 2 + argc + uint32_t(Construct);
+ MOZ_ASSERT(len > argc); // no overflow
+ if (!v_.resize(len)) {
+ return false;
+ }
+
+ *static_cast<JS::CallArgs*>(this) = CallArgsFromVp(argc, v_.begin());
+ this->constructing_ = Construct;
+ if (Construct) {
+ this->CallArgs::setThis(MagicValue(JS_IS_CONSTRUCTING));
+ }
+ return true;
+ }
+};
+
+/** Function call/construct args of statically-known count. */
+template <MaybeConstruct Construct, size_t N>
+class FixedArgsBase
+ : public std::conditional_t<Construct, AnyConstructArgs, AnyInvokeArgs> {
+ // Add +1 here to avoid noisy warning on gcc when N=0 (0 <= unsigned).
+ static_assert(N + 1 <= ARGS_LENGTH_MAX + 1, "o/~ too many args o/~");
+
+ protected:
+ JS::RootedValueArray<2 + N + uint32_t(Construct)> v_;
+
+ explicit FixedArgsBase(JSContext* cx) : v_(cx) {
+ *static_cast<JS::CallArgs*>(this) = CallArgsFromVp(N, v_.begin());
+ this->constructing_ = Construct;
+ if (Construct) {
+ this->CallArgs::setThis(MagicValue(JS_IS_CONSTRUCTING));
+ }
+ }
+};
+
+} // namespace detail
+
+/** Function call args of statically-unknown count. */
+class InvokeArgs : public detail::GenericArgsBase<NO_CONSTRUCT> {
+ using Base = detail::GenericArgsBase<NO_CONSTRUCT>;
+
+ public:
+ explicit InvokeArgs(JSContext* cx) : Base(cx) {}
+};
+
+/** Function call args of statically-unknown count. */
+class InvokeArgsMaybeIgnoresReturnValue
+ : public detail::GenericArgsBase<NO_CONSTRUCT> {
+ using Base = detail::GenericArgsBase<NO_CONSTRUCT>;
+
+ public:
+ explicit InvokeArgsMaybeIgnoresReturnValue(JSContext* cx) : Base(cx) {}
+
+ bool init(JSContext* cx, unsigned argc, bool ignoresReturnValue) {
+ if (!Base::init(cx, argc)) {
+ return false;
+ }
+ this->ignoresReturnValue_ = ignoresReturnValue;
+ return true;
+ }
+};
+
+/** Function call args of statically-known count. */
+template <size_t N>
+class FixedInvokeArgs : public detail::FixedArgsBase<NO_CONSTRUCT, N> {
+ using Base = detail::FixedArgsBase<NO_CONSTRUCT, N>;
+
+ public:
+ explicit FixedInvokeArgs(JSContext* cx) : Base(cx) {}
+};
+
+/** Function construct args of statically-unknown count. */
+class ConstructArgs : public detail::GenericArgsBase<CONSTRUCT> {
+ using Base = detail::GenericArgsBase<CONSTRUCT>;
+
+ public:
+ explicit ConstructArgs(JSContext* cx) : Base(cx) {}
+};
+
+/** Function call args of statically-known count. */
+template <size_t N>
+class FixedConstructArgs : public detail::FixedArgsBase<CONSTRUCT, N> {
+ using Base = detail::FixedArgsBase<CONSTRUCT, N>;
+
+ public:
+ explicit FixedConstructArgs(JSContext* cx) : Base(cx) {}
+};
+
+template <class Args, class Arraylike>
+inline bool FillArgumentsFromArraylike(JSContext* cx, Args& args,
+ const Arraylike& arraylike) {
+ uint32_t len = arraylike.length();
+ if (!args.init(cx, len)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < len; i++) {
+ args[i].set(arraylike[i]);
+ }
+
+ return true;
+}
+
+} // namespace js
+
+namespace mozilla {
+
+template <>
+struct DefaultHasher<js::AbstractFramePtr> {
+ using Lookup = js::AbstractFramePtr;
+
+ static js::HashNumber hash(const Lookup& key) {
+ return mozilla::HashGeneric(key.raw());
+ }
+
+ static bool match(const js::AbstractFramePtr& k, const Lookup& l) {
+ return k == l;
+ }
+};
+
+} // namespace mozilla
+
+#endif // vm_Stack_h
diff --git a/js/src/vm/StaticStrings.cpp b/js/src/vm/StaticStrings.cpp
new file mode 100644
index 0000000000..c88e02fa39
--- /dev/null
+++ b/js/src/vm/StaticStrings.cpp
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/StaticStrings.h"
+
+#include "mozilla/HashFunctions.h" // mozilla::HashString
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint32_t
+
+#include "js/HashTable.h" // js::HashNumber
+#include "js/TypeDecls.h" // Latin1Char
+#include "vm/Realm.h" // AutoAllocInAtomsZone
+#include "vm/StringType.h" // JSString, JSLinearString
+
+#include "vm/Realm-inl.h" // AutoAllocInAtomsZone
+#include "vm/StringType-inl.h" // NewInlineAtom
+
+using namespace js;
+
+constexpr StaticStrings::SmallCharTable StaticStrings::createSmallCharTable() {
+ SmallCharTable array{};
+ for (size_t i = 0; i < SMALL_CHAR_TABLE_SIZE; i++) {
+ array[i] = toSmallChar(i);
+ }
+ return array;
+}
+
+const StaticStrings::SmallCharTable StaticStrings::toSmallCharTable =
+ createSmallCharTable();
+
+bool StaticStrings::init(JSContext* cx) {
+ AutoAllocInAtomsZone az(cx);
+
+ static_assert(UNIT_STATIC_LIMIT - 1 <= JSString::MAX_LATIN1_CHAR,
+ "Unit strings must fit in Latin1Char.");
+
+ for (uint32_t i = 0; i < UNIT_STATIC_LIMIT; i++) {
+ Latin1Char ch = Latin1Char(i);
+ HashNumber hash = mozilla::HashString(&ch, 1);
+ JSAtom* a = NewInlineAtom(cx, &ch, 1, hash);
+ if (!a) {
+ return false;
+ }
+ a->makePermanent();
+ unitStaticTable[i] = a;
+ }
+
+ for (uint32_t i = 0; i < NUM_LENGTH2_ENTRIES; i++) {
+ Latin1Char buffer[] = {firstCharOfLength2(i), secondCharOfLength2(i)};
+ HashNumber hash = mozilla::HashString(buffer, 2);
+ JSAtom* a = NewInlineAtom(cx, buffer, 2, hash);
+ if (!a) {
+ return false;
+ }
+ a->makePermanent();
+ length2StaticTable[i] = a;
+ }
+
+ for (uint32_t i = 0; i < INT_STATIC_LIMIT; i++) {
+ if (i < 10) {
+ intStaticTable[i] = unitStaticTable[i + '0'];
+ } else if (i < 100) {
+ auto index =
+ getLength2IndexStatic(char(i / 10) + '0', char(i % 10) + '0');
+ intStaticTable[i] = length2StaticTable[index];
+ } else {
+ Latin1Char buffer[] = {Latin1Char(firstCharOfLength3(i)),
+ Latin1Char(secondCharOfLength3(i)),
+ Latin1Char(thirdCharOfLength3(i))};
+ HashNumber hash = mozilla::HashString(buffer, 3);
+ JSAtom* a = NewInlineAtom(cx, buffer, 3, hash);
+ if (!a) {
+ return false;
+ }
+ a->makePermanent();
+ intStaticTable[i] = a;
+ }
+
+ // Static string initialization can not race, so allow even without the
+ // lock.
+ intStaticTable[i]->setIsIndex(i);
+ }
+
+ return true;
+}
diff --git a/js/src/vm/StaticStrings.h b/js/src/vm/StaticStrings.h
new file mode 100644
index 0000000000..0a2beda98e
--- /dev/null
+++ b/js/src/vm/StaticStrings.h
@@ -0,0 +1,276 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_StaticStrings_h
+#define vm_StaticStrings_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Attributes.h" // MOZ_ALWAYS_INLINE
+#include "mozilla/TextUtils.h" // mozilla::{IsAsciiDigit, IsAsciiLowercaseAlpha, IsAsciiUppercaseAlpha}
+
+#include <stddef.h> // size_t
+#include <stdint.h> // int32_t, uint32_t
+#include <type_traits> // std::is_same_v
+
+#include "jstypes.h" // JS_PUBLIC_API, js::Bit, js::BitMask
+
+#include "js/TypeDecls.h" // JS::Latin1Char
+
+struct JS_PUBLIC_API JSContext;
+
+class JSAtom;
+class JSLinearString;
+class JSString;
+
+namespace js {
+
+namespace frontend {
+class ParserAtomsTable;
+class TaggedParserAtomIndex;
+class WellKnownParserAtoms;
+struct CompilationAtomCache;
+} // namespace frontend
+
+namespace jit {
+class MacroAssembler;
+} // namespace jit
+
+class StaticStrings {
+ // NOTE: The WellKnownParserAtoms rely on these tables and may need to be
+ // update if these tables are changed.
+ friend class js::frontend::ParserAtomsTable;
+ friend class js::frontend::TaggedParserAtomIndex;
+ friend class js::frontend::WellKnownParserAtoms;
+ friend struct js::frontend::CompilationAtomCache;
+
+ friend class js::jit::MacroAssembler;
+
+ private:
+ // Strings matches `[A-Za-z0-9$_]{2}` pattern.
+ // Store each character in 6 bits.
+ // See fromSmallChar/toSmallChar for the mapping.
+ static constexpr size_t SMALL_CHAR_BITS = 6;
+ static constexpr size_t SMALL_CHAR_MASK = js::BitMask(SMALL_CHAR_BITS);
+
+ // To optimize ASCII -> small char, allocate a table.
+ static constexpr size_t SMALL_CHAR_TABLE_SIZE = 128U;
+ static constexpr size_t NUM_SMALL_CHARS = js::Bit(SMALL_CHAR_BITS);
+ static constexpr size_t NUM_LENGTH2_ENTRIES =
+ NUM_SMALL_CHARS * NUM_SMALL_CHARS;
+
+ JSAtom* length2StaticTable[NUM_LENGTH2_ENTRIES] = {}; // zeroes
+
+ public:
+ /* We keep these public for the JITs. */
+ static const size_t UNIT_STATIC_LIMIT = 256U;
+ JSAtom* unitStaticTable[UNIT_STATIC_LIMIT] = {}; // zeroes
+
+ static const size_t INT_STATIC_LIMIT = 256U;
+ JSAtom* intStaticTable[INT_STATIC_LIMIT] = {}; // zeroes
+
+ StaticStrings() = default;
+
+ bool init(JSContext* cx);
+
+ static bool hasUint(uint32_t u) { return u < INT_STATIC_LIMIT; }
+
+ JSAtom* getUint(uint32_t u) {
+ MOZ_ASSERT(hasUint(u));
+ return intStaticTable[u];
+ }
+
+ static bool hasInt(int32_t i) { return uint32_t(i) < INT_STATIC_LIMIT; }
+
+ JSAtom* getInt(int32_t i) {
+ MOZ_ASSERT(hasInt(i));
+ return getUint(uint32_t(i));
+ }
+
+ static bool hasUnit(char16_t c) { return c < UNIT_STATIC_LIMIT; }
+
+ JSAtom* getUnit(char16_t c) {
+ MOZ_ASSERT(hasUnit(c));
+ return unitStaticTable[c];
+ }
+
+ /* May not return atom, returns null on (reported) failure. */
+ inline JSLinearString* getUnitStringForElement(JSContext* cx, JSString* str,
+ size_t index);
+
+ template <typename CharT>
+ static bool isStatic(const CharT* chars, size_t len);
+
+ /* Return null if no static atom exists for the given (chars, length). */
+ template <typename CharT>
+ MOZ_ALWAYS_INLINE JSAtom* lookup(const CharT* chars, size_t length) {
+ static_assert(std::is_same_v<CharT, JS::Latin1Char> ||
+ std::is_same_v<CharT, char16_t>,
+ "for understandability, |chars| must be one of a few "
+ "identified types");
+
+ switch (length) {
+ case 1: {
+ char16_t c = chars[0];
+ if (c < UNIT_STATIC_LIMIT) {
+ return getUnit(c);
+ }
+ return nullptr;
+ }
+ case 2:
+ if (fitsInSmallChar(chars[0]) && fitsInSmallChar(chars[1])) {
+ return getLength2(chars[0], chars[1]);
+ }
+ return nullptr;
+ case 3:
+ /*
+ * Here we know that JSString::intStringTable covers only 256 (or at
+ * least not 1000 or more) chars. We rely on order here to resolve the
+ * unit vs. int string/length-2 string atom identity issue by giving
+ * priority to unit strings for "0" through "9" and length-2 strings for
+ * "10" through "99".
+ */
+ int i;
+ if (fitsInLength3Static(chars[0], chars[1], chars[2], &i)) {
+ return getInt(i);
+ }
+ return nullptr;
+ }
+
+ return nullptr;
+ }
+
+ MOZ_ALWAYS_INLINE JSAtom* lookup(const char* chars, size_t length) {
+ // Collapse calls for |const char*| into |const Latin1Char char*| to avoid
+ // excess instantiations.
+ return lookup(reinterpret_cast<const JS::Latin1Char*>(chars), length);
+ }
+
+ private:
+ using SmallChar = uint8_t;
+
+ struct SmallCharTable {
+ SmallChar storage[SMALL_CHAR_TABLE_SIZE];
+
+ constexpr SmallChar& operator[](size_t idx) { return storage[idx]; }
+ constexpr const SmallChar& operator[](size_t idx) const {
+ return storage[idx];
+ }
+ };
+
+ static const SmallChar INVALID_SMALL_CHAR = -1;
+
+ static bool fitsInSmallChar(char16_t c) {
+ return c < SMALL_CHAR_TABLE_SIZE &&
+ toSmallCharTable[c] != INVALID_SMALL_CHAR;
+ }
+
+ template <typename CharT>
+ static bool fitsInLength3Static(CharT c1, CharT c2, CharT c3, int* i) {
+ static_assert(INT_STATIC_LIMIT <= 299,
+ "static int strings assumed below to be at most "
+ "three digits where the first digit is either 1 or 2");
+ if ('1' <= c1 && c1 < '3' && '0' <= c2 && c2 <= '9' && '0' <= c3 &&
+ c3 <= '9') {
+ *i = (c1 - '0') * 100 + (c2 - '0') * 10 + (c3 - '0');
+
+ if (unsigned(*i) < INT_STATIC_LIMIT) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static constexpr JS::Latin1Char fromSmallChar(SmallChar c);
+
+ static constexpr SmallChar toSmallChar(uint32_t c);
+
+ static constexpr SmallCharTable createSmallCharTable();
+
+ static const SmallCharTable toSmallCharTable;
+
+ static constexpr JS::Latin1Char firstCharOfLength2(size_t s) {
+ return fromSmallChar(s >> SMALL_CHAR_BITS);
+ }
+ static constexpr JS::Latin1Char secondCharOfLength2(size_t s) {
+ return fromSmallChar(s & SMALL_CHAR_MASK);
+ }
+
+ static constexpr JS::Latin1Char firstCharOfLength3(uint32_t i) {
+ return '0' + (i / 100);
+ }
+ static constexpr JS::Latin1Char secondCharOfLength3(uint32_t i) {
+ return '0' + ((i / 10) % 10);
+ }
+ static constexpr JS::Latin1Char thirdCharOfLength3(uint32_t i) {
+ return '0' + (i % 10);
+ }
+
+ static MOZ_ALWAYS_INLINE size_t getLength2Index(char16_t c1, char16_t c2) {
+ MOZ_ASSERT(fitsInSmallChar(c1));
+ MOZ_ASSERT(fitsInSmallChar(c2));
+ return (size_t(toSmallCharTable[c1]) << SMALL_CHAR_BITS) +
+ toSmallCharTable[c2];
+ }
+
+ // Same as getLength2Index, but withtout runtime assertion,
+ // this should be used only for known static string.
+ static constexpr size_t getLength2IndexStatic(char c1, char c2) {
+ return (size_t(toSmallChar(c1)) << SMALL_CHAR_BITS) + toSmallChar(c2);
+ }
+
+ MOZ_ALWAYS_INLINE JSAtom* getLength2FromIndex(size_t index) {
+ return length2StaticTable[index];
+ }
+
+ MOZ_ALWAYS_INLINE JSAtom* getLength2(char16_t c1, char16_t c2) {
+ return getLength2FromIndex(getLength2Index(c1, c2));
+ }
+};
+
+/*
+ * Declare length-2 strings. We only store strings where both characters are
+ * alphanumeric. The lower 10 short chars are the numerals, the next 26 are
+ * the lowercase letters, and the next 26 are the uppercase letters.
+ */
+
+constexpr JS::Latin1Char StaticStrings::fromSmallChar(SmallChar c) {
+ if (c < 10) {
+ return c + '0';
+ }
+ if (c < 36) {
+ return c + 'a' - 10;
+ }
+ if (c < 62) {
+ return c + 'A' - 36;
+ }
+ if (c == 62) {
+ return '$';
+ }
+ return '_';
+}
+
+constexpr StaticStrings::SmallChar StaticStrings::toSmallChar(uint32_t c) {
+ if (mozilla::IsAsciiDigit(c)) {
+ return c - '0';
+ }
+ if (mozilla::IsAsciiLowercaseAlpha(c)) {
+ return c - 'a' + 10;
+ }
+ if (mozilla::IsAsciiUppercaseAlpha(c)) {
+ return c - 'A' + 36;
+ }
+ if (c == '$') {
+ return 62;
+ }
+ if (c == '_') {
+ return 63;
+ }
+ return StaticStrings::INVALID_SMALL_CHAR;
+}
+
+} // namespace js
+
+#endif /* vm_StaticStrings_h */
diff --git a/js/src/vm/StencilCache.cpp b/js/src/vm/StencilCache.cpp
new file mode 100644
index 0000000000..8483d44300
--- /dev/null
+++ b/js/src/vm/StencilCache.cpp
@@ -0,0 +1,67 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/StencilCache.h"
+
+#include "frontend/CompilationStencil.h"
+#include "js/experimental/JSStencil.h"
+#include "vm/MutexIDs.h"
+
+js::StencilCache::StencilCache()
+ : cache(js::mutexid::StencilCache), enabled(false) {}
+
+js::StencilCache::AccessKey js::StencilCache::isSourceCached(
+ ScriptSource* src) {
+ if (!enabled) {
+ return cache.noAccess();
+ }
+
+ AccessKey lock(cache.lock());
+ if (!enabled) {
+ // As we checked the flag before taking the lock, we have to check again to
+ // avoid races on the cache manipulation.
+ return cache.noAccess();
+ }
+ if (!lock->watched.has(src)) {
+ // If the source does not have any cached function, and we do not expect to
+ // cache any delazification in the future, then skip any cache handling.
+ return cache.noAccess();
+ }
+ return lock;
+}
+
+bool js::StencilCache::startCaching(RefPtr<ScriptSource>&& src) {
+ auto guard = cache.lock();
+ if (!guard->watched.putNew(std::move(src))) {
+ return false;
+ }
+ enabled = true;
+ return true;
+}
+
+js::frontend::CompilationStencil* js::StencilCache::lookup(
+ AccessKey& guard, const StencilContext& key) {
+ auto ptr = guard->functions.lookup(key);
+ if (!ptr) {
+ return nullptr;
+ }
+
+ return ptr->value().get();
+}
+
+bool js::StencilCache::putNew(AccessKey& guard, const StencilContext& key,
+ js::frontend::CompilationStencil* value) {
+ return guard->functions.putNew(key, value);
+}
+
+// Important: This function should not be called within a scope checking for
+// isSourceCached, as this would cause a dead-lock.
+void js::StencilCache::clearAndDisable() {
+ auto guard = cache.lock();
+ guard->functions.clearAndCompact();
+ guard->watched.clearAndCompact();
+ enabled = false;
+}
diff --git a/js/src/vm/StencilCache.h b/js/src/vm/StencilCache.h
new file mode 100644
index 0000000000..0e600e7255
--- /dev/null
+++ b/js/src/vm/StencilCache.h
@@ -0,0 +1,181 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_StencilCache_h
+#define vm_StencilCache_h
+
+#include "mozilla/Atomics.h" // mozilla::Atomic
+#include "mozilla/HashFunctions.h" // mozilla::HashGeneric
+#include "mozilla/RefPtr.h" // mozilla::RefPtr
+
+#include "js/HashTable.h" // js::HashTable
+
+#include "threading/ExclusiveData.h" // js::ExclusiveData
+
+#include "vm/JSScript.h" // js::ScriptSource
+#include "vm/SharedStencil.h" // js::SourceExtent
+
+struct JS_PUBLIC_API JSContext; // vm/JSContext.h
+
+namespace js {
+
+namespace frontend {
+struct CompilationStencil; // frontend/CompilationStencil.h
+struct ExtensibleCompilationStencil; // frontend/CompilationStencil.h
+} /* namespace frontend */
+
+// Note, the key is a RefPtr<ScriptSource>, but neither the PointerHasher nor
+// DefaultHasher seems to handle these correctly.
+struct SourceCachePolicy {
+ using Lookup = const ScriptSource*;
+
+ static HashNumber hash(const Lookup& l) { return mozilla::HashGeneric(l); }
+ static bool match(const Lookup& entry, const Lookup& l) { return entry == l; }
+};
+
+// Immutable information to identify a unique function, as well as the
+// compilation context which will result in a unique Stencil once compiled.
+struct StencilContext {
+ // This pointer is used to isolate a single source. Note the uniqueness of the
+ // ReadOnlyCompileOptions is implied by the fact that we allocate a new
+ // ScriptSource for each CompilationInput, which are initialized with a set of
+ // CompileOptions.
+ RefPtr<ScriptSource> source;
+
+ SourceExtent::FunctionKey funKey;
+
+ StencilContext(RefPtr<ScriptSource>& source, SourceExtent extent)
+ : source(source), funKey(extent.toFunctionKey()) {}
+};
+
+struct StencilCachePolicy {
+ using Lookup = StencilContext;
+
+ static HashNumber hash(const Lookup& l) {
+ const ScriptSource* raw = l.source;
+ return mozilla::HashGeneric(raw, l.funKey);
+ }
+ static bool match(const Lookup& entry, const Lookup& l) {
+ return entry.source == l.source && entry.funKey == l.funKey;
+ }
+};
+
+// Cache stencils which are parsed from the same source, and with identical
+// compilation options.
+//
+// This cache does not check the principals, as the source should not be
+// aliased across different principals.
+//
+// The content provided by this cache is computed by delazification tasks. The
+// delazification task needs contextual information to generate Stencils, which
+// are then registered in this cache.
+//
+// To reclaim memory from this cache, the producers should be shutdown and the
+// cache should be cleared. As the delazification process needs contextual
+// information to generate the Stencils, it is impossible to resume without
+// either keeping memory or recomputing from the beginning.
+//
+// Therefore, this cache cannot be cleared without disabling it at the same
+// time. Disabling this cache ends all threads which attempts to add new
+// stencils.
+//
+// The cache can be in multiple states:
+//
+// - Cache disabled:
+//
+// The cache can be disabled when running in a steady state for some time,
+// or after reaching a shrinking GC, which reclaims the memory from this
+// cache.
+//
+// This state is expected for the steady state of web pages, once the
+// pages are loaded for a while and that we do not expect a huge load of
+// delazification.
+//
+// - Cache enabled, Source is not cached:
+//
+// This is a rare case which can happen either when testing the
+// StencilCache, or after some dynamic load of JavaScript code in a page
+// which already reached as steady state. In which case, we want to prevent
+// locking for any functions which are not part of the newly loaded source.
+//
+// This might also happen for eval-ed or inline JavaScript, which is
+// parsed on the main thread, and also delazified on the main thread.
+//
+// - Cache enabled, Source is cached:
+//
+// This case is expected to be frequent for any new document. A new
+// document will register many sources for parsing off-thread, and
+// triggering off-thread delazification.
+//
+// All newly parse sources will be registered here until a steady state is
+// reached, or a shrinking GC is called.
+class StencilCache {
+ using SourceSet =
+ js::HashSet<RefPtr<ScriptSource>, SourceCachePolicy, SystemAllocPolicy>;
+ using StencilMap =
+ js::HashMap<StencilContext, RefPtr<frontend::CompilationStencil>,
+ StencilCachePolicy, SystemAllocPolicy>;
+
+ struct CacheData {
+ // Sources which are recorded in this cache.
+ SourceSet watched;
+ // Stencils of functions which are recorded in this cache.
+ StencilMap functions;
+ };
+
+ // Map a function to its CompilationStencil.
+ ExclusiveData<CacheData> cache;
+
+ // This flag is mostly read, and changes rarely. We use this Atomic to avoid
+ // locking a Mutex when the cache is disabled.
+ //
+ // The cache can be disabled when running in a steady state for some time, or
+ // after reaching a shrinking GC, which reclaims the memory from this cache
+ // and indirectly ends the threads which are producing stencils for this
+ // cache.
+ mozilla::Atomic<bool, mozilla::ReleaseAcquire> enabled;
+
+ public:
+ StencilCache();
+
+ // An access key is returned when checking whether the cache is enabled. It
+ // should be used in an if statement and provided to all follow-up functions
+ // if it is true-ish.
+ using AccessKey = ExclusiveData<CacheData>::NullableGuard;
+
+ // Not all stencils should be cached, we use the ScriptSource pointer to
+ // identify whether we should look further or not in the cache.
+ AccessKey isSourceCached(ScriptSource* src);
+
+ // Register a source in the cache in order to cache any stencil associated
+ // with this source in the future. To stop caching, the function
+ // clearAndDisable can be used.
+ //
+ // Note: This function should be called once per source. Which is usualy after
+ // creating it.
+ [[nodiscard]] bool startCaching(RefPtr<ScriptSource>&& src);
+
+ // Checks if the cache contains a specific stencil and returns a pointer to
+ // it if it does. Otherwise, returns nullptr.
+ frontend::CompilationStencil* lookup(AccessKey& guard,
+ const StencilContext& key);
+
+ // Adds a newly compiled stencil to the cache. The cache should not contain
+ // any entry for this function before calling this function.
+ [[nodiscard]] bool putNew(AccessKey& guard, const StencilContext& key,
+ frontend::CompilationStencil* value);
+
+ // Prevent any further stencil from being cached, and clear and reclaim the
+ // memory of all stencil held by the cache.
+ //
+ // WARNING: This function should not be called within a scope checking for
+ // isSourceCached, as this would cause a dead-lock.
+ void clearAndDisable();
+};
+
+} /* namespace js */
+
+#endif /* vm_StencilCache_h */
diff --git a/js/src/vm/StencilEnums.h b/js/src/vm/StencilEnums.h
new file mode 100644
index 0000000000..23099aa80d
--- /dev/null
+++ b/js/src/vm/StencilEnums.h
@@ -0,0 +1,346 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_StencilEnums_h
+#define vm_StencilEnums_h
+
+#include <stdint.h> // uint8_t
+
+//
+// Enum definitions shared between frontend, stencil, and the VM.
+//
+
+namespace js {
+
+// [SMDOC] Try Notes
+//
+// Trynotes are attached to regions that are involved with
+// exception unwinding. They can be broken up into four categories:
+//
+// 1. Catch and Finally: Basic exception handling. A Catch trynote
+// covers the range of the associated try. A Finally trynote covers
+// the try and the catch.
+//
+// 2. ForIn and Destructuring: These operations create an iterator
+// which must be cleaned up (by calling IteratorClose) during
+// exception unwinding.
+//
+// 3. ForOf and ForOfIterclose: For-of loops handle unwinding using
+// catch blocks. These trynotes are used for for-of breaks/returns,
+// which create regions that are lexically within a for-of block,
+// but logically outside of it. See TryNoteIter::settle for more
+// details.
+//
+// 4. Loop: This represents normal for/while/do-while loops. It is
+// unnecessary for exception unwinding, but storing the boundaries
+// of loops here is helpful for heuristics that need to know
+// whether a given op is inside a loop.
+enum class TryNoteKind : uint8_t {
+ Catch,
+ Finally,
+ ForIn,
+ Destructuring,
+ ForOf,
+ ForOfIterClose,
+ Loop
+};
+
+// [SMDOC] Script Flags
+//
+// Interpreted scripts represented by the BaseScript type use two flag words to
+// encode an assortment of conditions and attributes about the script.
+//
+// The "immutable" flags are a combination of input flags describing aspects of
+// the execution context that affect parsing (such as if we are an ES module or
+// normal script), and flags derived from source text. These flags are preserved
+// during cloning and serializing. As well, they should never change after the
+// BaseScript is created (although there are currently a few exceptions for
+// de-/re-lazification that remain).
+//
+// The "mutable" flags are temporary flags that are used by subsystems in the
+// engine such as the debugger or JITs. These flags are not preserved through
+// serialization or cloning since the attributes are generally associated with
+// one specific instance of a BaseScript.
+
+enum class ImmutableScriptFlagsEnum : uint32_t {
+ // Input Flags
+ //
+ // These flags are from CompileOptions or the Parser entry point. They
+ // generally cannot be derived from the source text alone.
+ // ----
+
+ // A script may have one of the following kinds: Global, Eval, Module,
+ // Function. At most one flag can be set, with a default of Global.
+ IsForEval = 1 << 0,
+ IsModule = 1 << 1,
+ IsFunction = 1 << 2,
+
+ // The script is compiled as engine-internal self-hosted JavaScript. This mode
+ // is used to implement certain library functions and has special parse,
+ // bytecode, and runtime behaviour that differs from normal script.
+ SelfHosted = 1 << 3,
+
+ // The script was compiled with the default mode set to strict mode. Note that
+ // this tracks the default value, while the actual mode used (after processing
+ // source and its directives) is the `Strict` flag below.
+ ForceStrict = 1 << 4,
+
+ // The script has a non-syntactic scope on its environment chain. That is,
+ // there may be objects about which we know nothing between the outermost
+ // syntactic scope and the global.
+ HasNonSyntacticScope = 1 << 5,
+
+ // The script return value will not be used and simplified code will be
+ // generated. This can only be applied to top-level scripts. The value this
+ // script returns will be UndefinedValue instead of what the spec normally
+ // prescribes.
+ NoScriptRval = 1 << 6,
+
+ // TreatAsRunOnce roughly indicates that a script is expected to be run no
+ // more than once. This affects optimizations and heuristics.
+ //
+ // On top-level global/eval/module scripts, this is set when the embedding
+ // ensures this script will not be re-used. In this case, parser literals may
+ // be exposed directly instead of being cloned.
+ TreatAsRunOnce = 1 << 7,
+ // ----
+
+ // Parser Flags
+ //
+ // Flags computed by the Parser from the source text and input flags.
+ // ----
+
+ // Generated code will execute in strict mode. This is due to either the
+ // ForceStrict flag being specified above, or due to source text itself (such
+ // as "use strict" directives).
+ Strict = 1 << 8,
+
+ // Script is parsed with a top-level goal of Module. This may be a top-level
+ // or an inner-function script.
+ HasModuleGoal = 1 << 9,
+
+ // Script contains inner functions.
+ //
+ // Note: This prevents relazification since inner function close-over the
+ // current scripts scopes.
+ HasInnerFunctions = 1 << 10,
+
+ // There is a direct eval statement in this script OR in any of its inner
+ // functions.
+ //
+ // Note: This prevents relazification since it can introduce inner functions.
+ HasDirectEval = 1 << 11,
+
+ // The (static) bindings of this script must support dynamic name access for
+ // read/write. The environment chain is used to do these dynamic lookups and
+ // optimizations to avoid allocating environments are suppressed.
+ //
+ // This includes direct-eval, `with`, and `delete` in this script OR in any of
+ // its inner functions.
+ //
+ // Note: Access through the arguments object is not considered dynamic binding
+ // access since it does not go through the normal name lookup mechanism.
+ BindingsAccessedDynamically = 1 << 12,
+
+ // A tagged template exists in the body (which will use JSOp::CallSiteObj in
+ // bytecode).
+ //
+ // Note: This prevents relazification since the template's object is
+ // observable to the user and cannot be recreated.
+ HasCallSiteObj = 1 << 13,
+
+ // Parser Flags for Functions
+ // ----
+
+ // This function's initial prototype is one of Function, GeneratorFunction,
+ // AsyncFunction, or AsyncGeneratorFunction as indicated by these flags.
+ //
+ // If either of these flags is set, the script may suspend and resume as it
+ // executes. Stack frames for this script also have a generator object.
+ IsAsync = 1 << 14,
+ IsGenerator = 1 << 15,
+
+ // This function's body serves as the `var` environment for a non-strict
+ // direct eval. This matters because it's the only way bindings can be
+ // dynamically added to a local environment, possibly shadowing other
+ // variables.
+ FunHasExtensibleScope = 1 << 16,
+
+ // This function has an internal .this binding and we need to emit
+ // JSOp::FunctionThis in the prologue to initialize it. This binding may be
+ // used directly for "this", or indirectly (such as class constructors).
+ FunctionHasThisBinding = 1 << 17,
+
+ // This function is a class method that must uses an internal [[HomeObject]]
+ // slot. This slot is initialized when the class definition is executed in the
+ // enclosing function.
+ NeedsHomeObject = 1 << 18,
+
+ // This function is a constructor for a derived class. This is a class that
+ // uses the `extends` syntax.
+ IsDerivedClassConstructor = 1 << 19,
+
+ // This function is synthesized by the Parser. This is used for field
+ // initializer lambdas and missing constructors for classes. These functions
+ // have unusual source coordinates and may be hidden from things like
+ // Reflect.parse.
+ IsSyntheticFunction = 1 << 20,
+
+ // This function is a class constructor that has MemberInitializer data
+ // associated with it.
+ UseMemberInitializers = 1 << 21,
+
+ // This function has a rest (`...`) parameter.
+ HasRest = 1 << 22,
+
+ // This function needs a call object or named lambda environment to be created
+ // in order to execute the function. This is done in the Stack or JIT frame
+ // setup code _before_ the bytecode prologue starts.
+ NeedsFunctionEnvironmentObjects = 1 << 23,
+
+ // An extra VarScope is used as the body scope instead of the normal
+ // FunctionScope. This is needed when parameter expressions are used AND the
+ // function has var bindings or a sloppy-direct-eval. For example,
+ // `function(x = eval("")) { var y; }`
+ FunctionHasExtraBodyVarScope = 1 << 24,
+
+ // This function must define the implicit `arguments` binding on the function
+ // scope. If there are no free uses or an appropriate explicit binding exists,
+ // then this flag is unset.
+ //
+ // Note: Parameter expressions will not see an explicit `var arguments;`
+ // binding in the body and an implicit binding on the function-scope must
+ // still be used in that case.
+ ShouldDeclareArguments = 1 << 25,
+
+ // This function has a local (implicit or explicit) `arguments` binding. This
+ // binding is initialized by the JSOp::Arguments bytecode.
+ //
+ // Technically, every function has a binding named `arguments`. Internally,
+ // this binding is only added when `arguments` is mentioned by the function
+ // body.
+ //
+ // Examples:
+ // ```
+ // // Explicit definition
+ // function f() { var arguments; return arguments; }
+ //
+ // // Implicit use
+ // function f() { return arguments; }
+ //
+ // // Implicit use in arrow function
+ // function f() { return () => arguments; }
+ //
+ // // Implicit use in parameter expression
+ // function f(a = arguments) { return a; }
+ // ```
+ NeedsArgsObj = 1 << 26,
+
+ // This function must use the "mapped" form of an arguments object. This flag
+ // is set independently of whether we actually use an `arguments` binding. The
+ // conditions are specified in the ECMAScript spec.
+ HasMappedArgsObj = 1 << 27,
+
+ // Large self-hosted methods that should be inlined anyway by the JIT for
+ // performance reasons can be marked with this flag.
+ IsInlinableLargeFunction = 1 << 28,
+
+ // This function has an internal .newTarget binding and we need to emit
+ // JSOp::NewTarget in the prologue to initialize it. This binding may be
+ // used directly for "new.target", or indirectly (e.g. in super() calls).
+ FunctionHasNewTargetBinding = 1 << 29,
+
+ // Whether this is a self-hosted function that uses the ArgumentsLength or
+ // GetArgument intrinsic.
+ UsesArgumentsIntrinsics = 1 << 30,
+};
+
+enum class MutableScriptFlagsEnum : uint32_t {
+ // Number of times the |warmUpCount| was forcibly discarded. The counter is
+ // reset when a script is successfully jit-compiled.
+ WarmupResets_MASK = 0xFF,
+
+ // If treatAsRunOnce, whether script has executed.
+ HasRunOnce = 1 << 8,
+
+ // Script has been reused for a clone.
+ HasBeenCloned = 1 << 9,
+
+ // Script has an entry in Realm::scriptCountsMap.
+ HasScriptCounts = 1 << 10,
+
+ // Script has an entry in Realm::debugScriptMap.
+ HasDebugScript = 1 << 11,
+
+ // (1 << 12) is unused.
+ // (1 << 13) is unused.
+
+ // Script supports relazification where it releases bytecode and gcthings to
+ // save memory. This process is opt-in since various complexities may disallow
+ // this for some scripts.
+ // NOTE: Must check for isRelazifiable() before setting this flag.
+ AllowRelazify = 1 << 14,
+
+ // Set if the script has opted into spew.
+ SpewEnabled = 1 << 15,
+
+ // Set if we care about a script's final warmup count.
+ NeedsFinalWarmUpCount = 1 << 16,
+
+ //
+ // IonMonkey compilation hints.
+ //
+
+ // Whether Baseline or Ion compilation has been disabled for this script.
+ // IonDisabled is equivalent to |jitScript->canIonCompile() == false| but
+ // JitScript can be discarded on GC and we don't want this to affect
+ // observable behavior (see ArgumentsGetterImpl comment).
+ BaselineDisabled = 1 << 17,
+ IonDisabled = 1 << 18,
+
+ // This script should not be inlined into others. This happens after inlining
+ // has failed.
+ Uninlineable = 1 << 19,
+
+ // Indicates that this script has no eager baseline hint available
+ // in the cache, used to prevent further lookups.
+ NoEagerBaselineHint = 1 << 20,
+
+ // *****************************************************************
+ // The flags below are set when we bail out and invalidate a script.
+ // When we recompile, we will be more conservative.
+ // *****************************************************************
+
+ // A hoisted bounds check bailed out.
+ FailedBoundsCheck = 1 << 21,
+
+ // An instruction hoisted by LICM bailed out.
+ HadLICMInvalidation = 1 << 22,
+
+ // An instruction hoisted by InstructionReordering bailed out.
+ HadReorderingBailout = 1 << 23,
+
+ // An instruction inserted or truncated by Range Analysis bailed out.
+ HadEagerTruncationBailout = 1 << 24,
+
+ // A lexical check bailed out.
+ FailedLexicalCheck = 1 << 25,
+
+ // A guard inserted by phi specialization bailed out.
+ HadSpeculativePhiBailout = 1 << 26,
+
+ // An unbox folded with a load bailed out.
+ HadUnboxFoldingBailout = 1 << 27,
+};
+
+// Retrievable source can be retrieved using the source hook (and therefore
+// need not be XDR'd, can be discarded if desired because it can always be
+// reconstituted later, etc.).
+enum class SourceRetrievable { No = 0, Yes };
+
+} // namespace js
+
+#endif /* vm_StencilEnums_h */
diff --git a/js/src/vm/StencilObject.cpp b/js/src/vm/StencilObject.cpp
new file mode 100644
index 0000000000..da8a144e81
--- /dev/null
+++ b/js/src/vm/StencilObject.cpp
@@ -0,0 +1,147 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/StencilObject.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/PodOperations.h" // mozilla::PodCopy
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, INT32_MAX
+
+#include "jsapi.h" // JS_NewObject
+#include "js/Class.h" // JSClassOps, JSClass, JSCLASS_*
+#include "js/ErrorReport.h" // JS_ReportErrorASCII
+#include "js/experimental/JSStencil.h" // JS::Stencil, JS::StencilAddRef, JS::StencilRelease
+#include "js/RootingAPI.h" // JS::Rooted
+#include "js/Utility.h" // js_free
+#include "vm/JSContext.h" // JSContext
+#include "vm/JSObject.h" // JSObject
+
+using namespace js;
+
+/*static */ const JSClassOps StencilObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ StencilObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ nullptr, // trace
+};
+
+/*static */ const JSClass StencilObject::class_ = {
+ "StencilObject",
+ JSCLASS_HAS_RESERVED_SLOTS(StencilObject::ReservedSlots) |
+ JSCLASS_BACKGROUND_FINALIZE,
+ &StencilObject::classOps_};
+
+bool StencilObject::hasStencil() const {
+ // The stencil may not be present yet if we GC during initialization.
+ return !getReservedSlot(StencilSlot).isUndefined();
+}
+
+JS::Stencil* StencilObject::stencil() const {
+ void* ptr = getReservedSlot(StencilSlot).toPrivate();
+ MOZ_ASSERT(ptr);
+ return static_cast<JS::Stencil*>(ptr);
+}
+
+/* static */ StencilObject* StencilObject::create(JSContext* cx,
+ RefPtr<JS::Stencil> stencil) {
+ JS::Rooted<JSObject*> obj(cx, JS_NewObject(cx, &class_));
+ if (!obj) {
+ return nullptr;
+ }
+
+ obj->as<StencilObject>().setReservedSlot(
+ StencilSlot, PrivateValue(stencil.forget().take()));
+
+ return &obj->as<StencilObject>();
+}
+
+/* static */ void StencilObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ if (obj->as<StencilObject>().hasStencil()) {
+ JS::StencilRelease(obj->as<StencilObject>().stencil());
+ }
+}
+
+/*static */ const JSClassOps StencilXDRBufferObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ StencilXDRBufferObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ nullptr, // trace
+};
+
+/*static */ const JSClass StencilXDRBufferObject::class_ = {
+ "StencilXDRBufferObject",
+ JSCLASS_HAS_RESERVED_SLOTS(StencilXDRBufferObject::ReservedSlots) |
+ JSCLASS_BACKGROUND_FINALIZE,
+ &StencilXDRBufferObject::classOps_};
+
+bool StencilXDRBufferObject::hasBuffer() const {
+ // The stencil may not be present yet if we GC during initialization.
+ return !getReservedSlot(BufferSlot).isUndefined();
+}
+
+const uint8_t* StencilXDRBufferObject::buffer() const {
+ void* ptr = getReservedSlot(BufferSlot).toPrivate();
+ MOZ_ASSERT(ptr);
+ return static_cast<const uint8_t*>(ptr);
+}
+
+uint8_t* StencilXDRBufferObject::writableBuffer() {
+ void* ptr = getReservedSlot(BufferSlot).toPrivate();
+ MOZ_ASSERT(ptr);
+ return static_cast<uint8_t*>(ptr);
+}
+
+size_t StencilXDRBufferObject::bufferLength() const {
+ return getReservedSlot(LengthSlot).toInt32();
+}
+
+/* static */ StencilXDRBufferObject* StencilXDRBufferObject::create(
+ JSContext* cx, uint8_t* buffer, size_t length) {
+ if (length >= INT32_MAX) {
+ JS_ReportErrorASCII(cx, "XDR buffer is too long");
+ return nullptr;
+ }
+
+ JS::Rooted<JSObject*> obj(cx, JS_NewObject(cx, &class_));
+ if (!obj) {
+ return nullptr;
+ }
+
+ auto ownedBuffer = cx->make_pod_array<uint8_t>(length);
+ if (!ownedBuffer) {
+ return nullptr;
+ }
+
+ mozilla::PodCopy(ownedBuffer.get(), buffer, length);
+
+ obj->as<StencilXDRBufferObject>().setReservedSlot(
+ BufferSlot, PrivateValue(ownedBuffer.release()));
+ obj->as<StencilXDRBufferObject>().setReservedSlot(LengthSlot,
+ Int32Value(length));
+
+ return &obj->as<StencilXDRBufferObject>();
+}
+
+/* static */ void StencilXDRBufferObject::finalize(JS::GCContext* gcx,
+ JSObject* obj) {
+ if (obj->as<StencilXDRBufferObject>().hasBuffer()) {
+ js_free(obj->as<StencilXDRBufferObject>().writableBuffer());
+ }
+}
diff --git a/js/src/vm/StencilObject.h b/js/src/vm/StencilObject.h
new file mode 100644
index 0000000000..b891c37319
--- /dev/null
+++ b/js/src/vm/StencilObject.h
@@ -0,0 +1,71 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_StencilObject_h
+#define vm_StencilObject_h
+
+#include "mozilla/RefPtr.h" // RefPtr
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t
+
+#include "js/Class.h" // JSClassOps, JSClass
+#include "js/experimental/JSStencil.h" // JS::Stencil
+#include "js/TypeDecls.h"
+#include "vm/NativeObject.h" // NativeObject
+
+class JSObject;
+
+namespace js {
+
+// Object that holds JS::Stencil.
+//
+// This is a testing-only feature which can only be produced by testing
+// functions.
+class StencilObject : public NativeObject {
+ static constexpr size_t StencilSlot = 0;
+ static constexpr size_t ReservedSlots = 1;
+
+ public:
+ static const JSClassOps classOps_;
+ static const JSClass class_;
+
+ bool hasStencil() const;
+ JS::Stencil* stencil() const;
+
+ static StencilObject* create(JSContext* cx, RefPtr<JS::Stencil> stencil);
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+};
+
+// Object that holds Stencil XDR buffer.
+//
+// This is a testing-only feature which can only be produced by testing
+// functions.
+class StencilXDRBufferObject : public NativeObject {
+ static constexpr size_t BufferSlot = 0;
+ static constexpr size_t LengthSlot = 1;
+ static constexpr size_t ReservedSlots = 2;
+
+ public:
+ static const JSClassOps classOps_;
+ static const JSClass class_;
+
+ bool hasBuffer() const;
+ const uint8_t* buffer() const;
+ size_t bufferLength() const;
+
+ private:
+ uint8_t* writableBuffer();
+
+ public:
+ static StencilXDRBufferObject* create(JSContext* cx, uint8_t* buffer,
+ size_t length);
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+};
+
+} /* namespace js */
+
+#endif /* vm_StencilObject_h */
diff --git a/js/src/vm/StringObject-inl.h b/js/src/vm/StringObject-inl.h
new file mode 100644
index 0000000000..55616adeaf
--- /dev/null
+++ b/js/src/vm/StringObject-inl.h
@@ -0,0 +1,51 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_StringObject_inl_h
+#define vm_StringObject_inl_h
+
+#include "vm/StringObject.h"
+
+#include "vm/JSObject-inl.h"
+#include "vm/Shape-inl.h"
+
+namespace js {
+
+/* static */ inline bool StringObject::init(JSContext* cx,
+ Handle<StringObject*> obj,
+ HandleString str) {
+ MOZ_ASSERT(obj->numFixedSlots() == 2);
+
+ if (!SharedShape::ensureInitialCustomShape<StringObject>(cx, obj)) {
+ return false;
+ }
+
+ MOZ_ASSERT(obj->lookup(cx, NameToId(cx->names().length))->slot() ==
+ LENGTH_SLOT);
+
+ obj->setStringThis(str);
+
+ return true;
+}
+
+/* static */ inline StringObject* StringObject::create(JSContext* cx,
+ HandleString str,
+ HandleObject proto,
+ NewObjectKind newKind) {
+ Rooted<StringObject*> obj(
+ cx, NewObjectWithClassProtoAndKind<StringObject>(cx, proto, newKind));
+ if (!obj) {
+ return nullptr;
+ }
+ if (!StringObject::init(cx, obj, str)) {
+ return nullptr;
+ }
+ return obj;
+}
+
+} // namespace js
+
+#endif /* vm_StringObject_inl_h */
diff --git a/js/src/vm/StringObject.h b/js/src/vm/StringObject.h
new file mode 100644
index 0000000000..ad90e649f0
--- /dev/null
+++ b/js/src/vm/StringObject.h
@@ -0,0 +1,72 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_StringObject_h
+#define vm_StringObject_h
+
+#include "vm/JSObject.h"
+#include "vm/NativeObject.h"
+
+namespace js {
+
+class Shape;
+
+class StringObject : public NativeObject {
+ static const unsigned PRIMITIVE_VALUE_SLOT = 0;
+ static const unsigned LENGTH_SLOT = 1;
+
+ static const ClassSpec classSpec_;
+
+ public:
+ static const unsigned RESERVED_SLOTS = 2;
+
+ static const JSClass class_;
+
+ /*
+ * Creates a new String object boxing the given string. The object's
+ * [[Prototype]] is determined from context.
+ */
+ static inline StringObject* create(JSContext* cx, HandleString str,
+ HandleObject proto = nullptr,
+ NewObjectKind newKind = GenericObject);
+
+ /*
+ * Compute the initial shape to associate with fresh String objects, which
+ * encodes the initial length property. Return the shape after changing
+ * |obj|'s last property to it.
+ */
+ static SharedShape* assignInitialShape(JSContext* cx,
+ Handle<StringObject*> obj);
+
+ JSString* unbox() const {
+ return getFixedSlot(PRIMITIVE_VALUE_SLOT).toString();
+ }
+
+ inline size_t length() const {
+ return size_t(getFixedSlot(LENGTH_SLOT).toInt32());
+ }
+
+ static size_t offsetOfPrimitiveValue() {
+ return getFixedSlotOffset(PRIMITIVE_VALUE_SLOT);
+ }
+ static size_t offsetOfLength() { return getFixedSlotOffset(LENGTH_SLOT); }
+
+ private:
+ static inline bool init(JSContext* cx, Handle<StringObject*> obj,
+ HandleString str);
+
+ static JSObject* createPrototype(JSContext* cx, JSProtoKey key);
+
+ void setStringThis(JSString* str) {
+ MOZ_ASSERT(getReservedSlot(PRIMITIVE_VALUE_SLOT).isUndefined());
+ setFixedSlot(PRIMITIVE_VALUE_SLOT, StringValue(str));
+ setFixedSlot(LENGTH_SLOT, Int32Value(int32_t(str->length())));
+ }
+};
+
+} // namespace js
+
+#endif /* vm_StringObject_h */
diff --git a/js/src/vm/StringType-inl.h b/js/src/vm/StringType-inl.h
new file mode 100644
index 0000000000..8abc944c40
--- /dev/null
+++ b/js/src/vm/StringType-inl.h
@@ -0,0 +1,526 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_StringType_inl_h
+#define vm_StringType_inl_h
+
+#include "vm/StringType.h"
+
+#include "mozilla/PodOperations.h"
+#include "mozilla/Range.h"
+
+#include "gc/Allocator.h"
+#include "gc/MaybeRooted.h"
+#include "gc/StoreBuffer.h"
+#include "js/UniquePtr.h"
+#include "vm/JSContext.h"
+#include "vm/StaticStrings.h"
+
+#include "gc/GCContext-inl.h"
+#include "gc/StoreBuffer-inl.h"
+
+namespace js {
+
+// Allocate a thin inline string if possible, and a fat inline string if not.
+template <AllowGC allowGC, typename CharT>
+static MOZ_ALWAYS_INLINE JSInlineString* AllocateInlineString(
+ JSContext* cx, size_t len, CharT** chars, js::gc::Heap heap) {
+ MOZ_ASSERT(JSInlineString::lengthFits<CharT>(len));
+
+ if (JSThinInlineString::lengthFits<CharT>(len)) {
+ return cx->newCell<JSThinInlineString, allowGC>(heap, len, chars);
+ }
+ return cx->newCell<JSFatInlineString, allowGC>(heap, len, chars);
+}
+
+template <typename CharT>
+static MOZ_ALWAYS_INLINE JSAtom* AllocateInlineAtom(JSContext* cx, size_t len,
+ CharT** chars,
+ js::HashNumber hash) {
+ MOZ_ASSERT(JSInlineString::lengthFits<CharT>(len));
+
+ if (JSThinInlineString::lengthFits<CharT>(len)) {
+ return cx->newCell<js::NormalAtom, js::NoGC>(len, chars, hash);
+ }
+ return cx->newCell<js::FatInlineAtom, js::NoGC>(len, chars, hash);
+}
+
+// Create a thin inline string if possible, and a fat inline string if not.
+template <AllowGC allowGC, typename CharT>
+static MOZ_ALWAYS_INLINE JSInlineString* NewInlineString(
+ JSContext* cx, mozilla::Range<const CharT> chars,
+ js::gc::Heap heap = js::gc::Heap::Default) {
+ /*
+ * Don't bother trying to find a static atom; measurement shows that not
+ * many get here (for one, Atomize is catching them).
+ */
+
+ size_t len = chars.length();
+ CharT* storage;
+ JSInlineString* str = AllocateInlineString<allowGC>(cx, len, &storage, heap);
+ if (!str) {
+ return nullptr;
+ }
+
+ mozilla::PodCopy(storage, chars.begin().get(), len);
+ return str;
+}
+
+template <typename CharT>
+static MOZ_ALWAYS_INLINE JSAtom* NewInlineAtom(JSContext* cx,
+ const CharT* chars,
+ size_t length,
+ js::HashNumber hash) {
+ CharT* storage;
+ JSAtom* str = AllocateInlineAtom(cx, length, &storage, hash);
+ if (!str) {
+ return nullptr;
+ }
+
+ mozilla::PodCopy(storage, chars, length);
+ return str;
+}
+
+// Create a thin inline string if possible, and a fat inline string if not.
+template <typename CharT>
+static MOZ_ALWAYS_INLINE JSInlineString* NewInlineString(
+ JSContext* cx, Handle<JSLinearString*> base, size_t start, size_t length,
+ js::gc::Heap heap) {
+ MOZ_ASSERT(JSInlineString::lengthFits<CharT>(length));
+
+ CharT* chars;
+ JSInlineString* s = AllocateInlineString<CanGC>(cx, length, &chars, heap);
+ if (!s) {
+ return nullptr;
+ }
+
+ JS::AutoCheckCannotGC nogc;
+ mozilla::PodCopy(chars, base->chars<CharT>(nogc) + start, length);
+ return s;
+}
+
+template <typename CharT>
+static MOZ_ALWAYS_INLINE JSLinearString* TryEmptyOrStaticString(
+ JSContext* cx, const CharT* chars, size_t n) {
+ // Measurements on popular websites indicate empty strings are pretty common
+ // and most strings with length 1 or 2 are in the StaticStrings table. For
+ // length 3 strings that's only about 1%, so we check n <= 2.
+ if (n <= 2) {
+ if (n == 0) {
+ return cx->emptyString();
+ }
+
+ if (JSLinearString* str = cx->staticStrings().lookup(chars, n)) {
+ return str;
+ }
+ }
+
+ return nullptr;
+}
+
+} /* namespace js */
+
+MOZ_ALWAYS_INLINE bool JSString::validateLength(JSContext* maybecx,
+ size_t length) {
+ return validateLengthInternal<js::CanGC>(maybecx, length);
+}
+
+template <js::AllowGC allowGC>
+MOZ_ALWAYS_INLINE bool JSString::validateLengthInternal(JSContext* maybecx,
+ size_t length) {
+ if (MOZ_UNLIKELY(length > JSString::MAX_LENGTH)) {
+ if constexpr (allowGC) {
+ js::ReportOversizedAllocation(maybecx, JSMSG_ALLOC_OVERFLOW);
+ }
+ return false;
+ }
+
+ return true;
+}
+
+template <>
+MOZ_ALWAYS_INLINE const char16_t* JSString::nonInlineCharsRaw() const {
+ return d.s.u2.nonInlineCharsTwoByte;
+}
+
+template <>
+MOZ_ALWAYS_INLINE const JS::Latin1Char* JSString::nonInlineCharsRaw() const {
+ return d.s.u2.nonInlineCharsLatin1;
+}
+
+inline JSRope::JSRope(JSString* left, JSString* right, size_t length) {
+ // JITs expect rope children aren't empty.
+ MOZ_ASSERT(!left->empty() && !right->empty());
+
+ if (left->hasLatin1Chars() && right->hasLatin1Chars()) {
+ setLengthAndFlags(length, INIT_ROPE_FLAGS | LATIN1_CHARS_BIT);
+ } else {
+ setLengthAndFlags(length, INIT_ROPE_FLAGS);
+ }
+ d.s.u2.left = left;
+ d.s.u3.right = right;
+
+ // Post-barrier by inserting into the whole cell buffer if either
+ // this -> left or this -> right is a tenured -> nursery edge.
+ if (isTenured()) {
+ js::gc::StoreBuffer* sb = left->storeBuffer();
+ if (!sb) {
+ sb = right->storeBuffer();
+ }
+ if (sb) {
+ sb->putWholeCell(this);
+ }
+ }
+}
+
+template <js::AllowGC allowGC>
+MOZ_ALWAYS_INLINE JSRope* JSRope::new_(
+ JSContext* cx,
+ typename js::MaybeRooted<JSString*, allowGC>::HandleType left,
+ typename js::MaybeRooted<JSString*, allowGC>::HandleType right,
+ size_t length, js::gc::Heap heap) {
+ if (MOZ_UNLIKELY(!validateLengthInternal<allowGC>(cx, length))) {
+ return nullptr;
+ }
+ return cx->newCell<JSRope, allowGC>(heap, left, right, length);
+}
+
+inline JSDependentString::JSDependentString(JSLinearString* base, size_t start,
+ size_t length) {
+ MOZ_ASSERT(start + length <= base->length());
+ JS::AutoCheckCannotGC nogc;
+ if (base->hasLatin1Chars()) {
+ setLengthAndFlags(length, INIT_DEPENDENT_FLAGS | LATIN1_CHARS_BIT);
+ d.s.u2.nonInlineCharsLatin1 = base->latin1Chars(nogc) + start;
+ } else {
+ setLengthAndFlags(length, INIT_DEPENDENT_FLAGS);
+ d.s.u2.nonInlineCharsTwoByte = base->twoByteChars(nogc) + start;
+ }
+ d.s.u3.base = base;
+ if (isTenured() && !base->isTenured()) {
+ base->storeBuffer()->putWholeCell(this);
+ }
+}
+
+MOZ_ALWAYS_INLINE JSLinearString* JSDependentString::new_(
+ JSContext* cx, JSLinearString* baseArg, size_t start, size_t length,
+ js::gc::Heap heap) {
+ /*
+ * Try to avoid long chains of dependent strings. We can't avoid these
+ * entirely, however, due to how ropes are flattened.
+ */
+ if (baseArg->isDependent()) {
+ start += baseArg->asDependent().baseOffset();
+ baseArg = baseArg->asDependent().base();
+ }
+
+ MOZ_ASSERT(start + length <= baseArg->length());
+
+ /*
+ * Do not create a string dependent on inline chars from another string,
+ * both to avoid the awkward moving-GC hazard this introduces and because it
+ * is more efficient to immediately undepend here.
+ */
+ bool useInline = baseArg->hasTwoByteChars()
+ ? JSInlineString::lengthFits<char16_t>(length)
+ : JSInlineString::lengthFits<JS::Latin1Char>(length);
+ if (useInline) {
+ JS::Rooted<JSLinearString*> base(cx, baseArg);
+ return baseArg->hasLatin1Chars()
+ ? js::NewInlineString<JS::Latin1Char>(cx, base, start, length,
+ heap)
+ : js::NewInlineString<char16_t>(cx, base, start, length, heap);
+ }
+
+ JSDependentString* str =
+ cx->newCell<JSDependentString, js::NoGC>(heap, baseArg, start, length);
+ if (str) {
+ return str;
+ }
+
+ JS::Rooted<JSLinearString*> base(cx, baseArg);
+ return cx->newCell<JSDependentString>(heap, base, start, length);
+}
+
+inline JSLinearString::JSLinearString(const char16_t* chars, size_t length) {
+ setLengthAndFlags(length, INIT_LINEAR_FLAGS);
+ // Check that the new buffer is located in the StringBufferArena
+ checkStringCharsArena(chars);
+ d.s.u2.nonInlineCharsTwoByte = chars;
+}
+
+inline JSLinearString::JSLinearString(const JS::Latin1Char* chars,
+ size_t length) {
+ setLengthAndFlags(length, INIT_LINEAR_FLAGS | LATIN1_CHARS_BIT);
+ // Check that the new buffer is located in the StringBufferArena
+ checkStringCharsArena(chars);
+ d.s.u2.nonInlineCharsLatin1 = chars;
+}
+
+void JSLinearString::disownCharsBecauseError() {
+ setLengthAndFlags(0, INIT_LINEAR_FLAGS | LATIN1_CHARS_BIT);
+ d.s.u2.nonInlineCharsLatin1 = nullptr;
+}
+
+template <js::AllowGC allowGC, typename CharT>
+MOZ_ALWAYS_INLINE JSLinearString* JSLinearString::new_(
+ JSContext* cx, js::UniquePtr<CharT[], JS::FreePolicy> chars, size_t length,
+ js::gc::Heap heap) {
+ if (MOZ_UNLIKELY(!validateLengthInternal<allowGC>(cx, length))) {
+ return nullptr;
+ }
+
+ return newValidLength<allowGC>(cx, std::move(chars), length, heap);
+}
+
+template <js::AllowGC allowGC, typename CharT>
+MOZ_ALWAYS_INLINE JSLinearString* JSLinearString::newValidLength(
+ JSContext* cx, js::UniquePtr<CharT[], JS::FreePolicy> chars, size_t length,
+ js::gc::Heap heap) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ JSLinearString* str =
+ cx->newCell<JSLinearString, allowGC>(heap, chars.get(), length);
+ if (!str) {
+ return nullptr;
+ }
+
+ if (!str->isTenured()) {
+ // If the following registration fails, the string is partially initialized
+ // and must be made valid, or its finalizer may attempt to free
+ // uninitialized memory.
+ if (!cx->runtime()->gc.nursery().registerMallocedBuffer(
+ chars.get(), length * sizeof(CharT))) {
+ str->disownCharsBecauseError();
+ if (allowGC) {
+ ReportOutOfMemory(cx);
+ }
+ return nullptr;
+ }
+ } else {
+ // This can happen off the main thread for the atoms zone.
+ cx->zone()->addCellMemory(str, length * sizeof(CharT),
+ js::MemoryUse::StringContents);
+ }
+
+ (void)chars.release();
+ return str;
+}
+
+template <typename CharT>
+MOZ_ALWAYS_INLINE JSAtom* JSAtom::newValidLength(
+ JSContext* cx, js::UniquePtr<CharT[], JS::FreePolicy> chars, size_t length,
+ js::HashNumber hash) {
+ MOZ_ASSERT(validateLength(cx, length));
+ MOZ_ASSERT(cx->zone()->isAtomsZone());
+ JSAtom* str =
+ cx->newCell<js::NormalAtom, js::NoGC>(chars.get(), length, hash);
+ if (!str) {
+ return nullptr;
+ }
+ (void)chars.release();
+
+ MOZ_ASSERT(str->isTenured());
+ cx->zone()->addCellMemory(str, length * sizeof(CharT),
+ js::MemoryUse::StringContents);
+
+ return str;
+}
+
+inline js::PropertyName* JSLinearString::toPropertyName(JSContext* cx) {
+#ifdef DEBUG
+ uint32_t dummy;
+ MOZ_ASSERT(!isIndex(&dummy));
+#endif
+ if (isAtom()) {
+ return asAtom().asPropertyName();
+ }
+ JSAtom* atom = js::AtomizeString(cx, this);
+ if (!atom) {
+ return nullptr;
+ }
+ return atom->asPropertyName();
+}
+
+template <js::AllowGC allowGC>
+MOZ_ALWAYS_INLINE JSThinInlineString* JSThinInlineString::new_(
+ JSContext* cx, js::gc::Heap heap) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ return cx->newCell<JSThinInlineString, allowGC>(heap);
+}
+
+template <js::AllowGC allowGC>
+MOZ_ALWAYS_INLINE JSFatInlineString* JSFatInlineString::new_(
+ JSContext* cx, js::gc::Heap heap) {
+ MOZ_ASSERT(!cx->zone()->isAtomsZone());
+ return cx->newCell<JSFatInlineString, allowGC>(heap);
+}
+
+inline JSThinInlineString::JSThinInlineString(size_t length,
+ JS::Latin1Char** chars) {
+ MOZ_ASSERT(lengthFits<JS::Latin1Char>(length));
+ setLengthAndFlags(length, INIT_THIN_INLINE_FLAGS | LATIN1_CHARS_BIT);
+ *chars = d.inlineStorageLatin1;
+}
+
+inline JSThinInlineString::JSThinInlineString(size_t length, char16_t** chars) {
+ MOZ_ASSERT(lengthFits<char16_t>(length));
+ setLengthAndFlags(length, INIT_THIN_INLINE_FLAGS);
+ *chars = d.inlineStorageTwoByte;
+}
+
+inline JSFatInlineString::JSFatInlineString(size_t length,
+ JS::Latin1Char** chars) {
+ MOZ_ASSERT(lengthFits<JS::Latin1Char>(length));
+ setLengthAndFlags(length, INIT_FAT_INLINE_FLAGS | LATIN1_CHARS_BIT);
+ *chars = d.inlineStorageLatin1;
+}
+
+inline JSFatInlineString::JSFatInlineString(size_t length, char16_t** chars) {
+ MOZ_ASSERT(lengthFits<char16_t>(length));
+ setLengthAndFlags(length, INIT_FAT_INLINE_FLAGS);
+ *chars = d.inlineStorageTwoByte;
+}
+
+inline JSExternalString::JSExternalString(
+ const char16_t* chars, size_t length,
+ const JSExternalStringCallbacks* callbacks) {
+ MOZ_ASSERT(callbacks);
+ setLengthAndFlags(length, EXTERNAL_FLAGS);
+ d.s.u2.nonInlineCharsTwoByte = chars;
+ d.s.u3.externalCallbacks = callbacks;
+}
+
+MOZ_ALWAYS_INLINE JSExternalString* JSExternalString::new_(
+ JSContext* cx, const char16_t* chars, size_t length,
+ const JSExternalStringCallbacks* callbacks) {
+ if (MOZ_UNLIKELY(!validateLength(cx, length))) {
+ return nullptr;
+ }
+ auto* str = cx->newCell<JSExternalString>(chars, length, callbacks);
+ if (!str) {
+ return nullptr;
+ }
+ size_t nbytes = length * sizeof(char16_t);
+
+ MOZ_ASSERT(str->isTenured());
+ js::AddCellMemory(str, nbytes, js::MemoryUse::StringContents);
+
+ return str;
+}
+
+inline js::NormalAtom::NormalAtom(size_t length, JS::Latin1Char** chars,
+ js::HashNumber hash)
+ : hash_(hash) {
+ MOZ_ASSERT(JSInlineString::lengthFits<JS::Latin1Char>(length));
+ setLengthAndFlags(length,
+ INIT_THIN_INLINE_FLAGS | LATIN1_CHARS_BIT | ATOM_BIT);
+ *chars = d.inlineStorageLatin1;
+}
+
+inline js::NormalAtom::NormalAtom(size_t length, char16_t** chars,
+ js::HashNumber hash)
+ : hash_(hash) {
+ MOZ_ASSERT(JSInlineString::lengthFits<char16_t>(length));
+ setLengthAndFlags(length, INIT_THIN_INLINE_FLAGS | ATOM_BIT);
+ *chars = d.inlineStorageTwoByte;
+}
+
+inline js::NormalAtom::NormalAtom(const char16_t* chars, size_t length,
+ js::HashNumber hash)
+ : hash_(hash) {
+ setLengthAndFlags(length, INIT_LINEAR_FLAGS | ATOM_BIT);
+ // Check that the new buffer is located in the StringBufferArena
+ checkStringCharsArena(chars);
+ d.s.u2.nonInlineCharsTwoByte = chars;
+}
+
+inline js::NormalAtom::NormalAtom(const JS::Latin1Char* chars, size_t length,
+ js::HashNumber hash)
+ : hash_(hash) {
+ setLengthAndFlags(length, INIT_LINEAR_FLAGS | LATIN1_CHARS_BIT | ATOM_BIT);
+ // Check that the new buffer is located in the StringBufferArena
+ checkStringCharsArena(chars);
+ d.s.u2.nonInlineCharsLatin1 = chars;
+}
+
+inline js::FatInlineAtom::FatInlineAtom(size_t length, JS::Latin1Char** chars,
+ js::HashNumber hash)
+ : hash_(hash) {
+ MOZ_ASSERT(JSFatInlineString::lengthFits<JS::Latin1Char>(length));
+ setLengthAndFlags(length,
+ INIT_FAT_INLINE_FLAGS | LATIN1_CHARS_BIT | ATOM_BIT);
+ *chars = d.inlineStorageLatin1;
+}
+
+inline js::FatInlineAtom::FatInlineAtom(size_t length, char16_t** chars,
+ js::HashNumber hash)
+ : hash_(hash) {
+ MOZ_ASSERT(JSFatInlineString::lengthFits<char16_t>(length));
+ setLengthAndFlags(length, INIT_FAT_INLINE_FLAGS | ATOM_BIT);
+ *chars = d.inlineStorageTwoByte;
+}
+
+inline JSLinearString* js::StaticStrings::getUnitStringForElement(
+ JSContext* cx, JSString* str, size_t index) {
+ MOZ_ASSERT(index < str->length());
+
+ char16_t c;
+ if (!str->getChar(cx, index, &c)) {
+ return nullptr;
+ }
+ if (c < UNIT_STATIC_LIMIT) {
+ return getUnit(c);
+ }
+ return js::NewInlineString<CanGC>(cx, mozilla::Range<const char16_t>(&c, 1),
+ js::gc::Heap::Default);
+}
+
+MOZ_ALWAYS_INLINE void JSString::finalize(JS::GCContext* gcx) {
+ /* FatInline strings are in a different arena. */
+ MOZ_ASSERT(getAllocKind() != js::gc::AllocKind::FAT_INLINE_STRING);
+ MOZ_ASSERT(getAllocKind() != js::gc::AllocKind::FAT_INLINE_ATOM);
+
+ if (isLinear()) {
+ asLinear().finalize(gcx);
+ } else {
+ MOZ_ASSERT(isRope());
+ }
+}
+
+inline void JSLinearString::finalize(JS::GCContext* gcx) {
+ MOZ_ASSERT(getAllocKind() != js::gc::AllocKind::FAT_INLINE_STRING);
+ MOZ_ASSERT(getAllocKind() != js::gc::AllocKind::FAT_INLINE_ATOM);
+
+ if (!isInline() && !isDependent()) {
+ gcx->free_(this, nonInlineCharsRaw(), allocSize(),
+ js::MemoryUse::StringContents);
+ }
+}
+
+inline void JSFatInlineString::finalize(JS::GCContext* gcx) {
+ MOZ_ASSERT(getAllocKind() == js::gc::AllocKind::FAT_INLINE_STRING);
+ MOZ_ASSERT(isInline());
+
+ // Nothing to do.
+}
+
+inline void js::FatInlineAtom::finalize(JS::GCContext* gcx) {
+ MOZ_ASSERT(JSString::isAtom());
+ MOZ_ASSERT(getAllocKind() == js::gc::AllocKind::FAT_INLINE_ATOM);
+
+ // Nothing to do.
+}
+
+inline void JSExternalString::finalize(JS::GCContext* gcx) {
+ MOZ_ASSERT(JSString::isExternal());
+
+ size_t nbytes = length() * sizeof(char16_t);
+ gcx->removeCellMemory(this, nbytes, js::MemoryUse::StringContents);
+
+ callbacks()->finalize(const_cast<char16_t*>(rawTwoByteChars()));
+}
+
+#endif /* vm_StringType_inl_h */
diff --git a/js/src/vm/StringType.cpp b/js/src/vm/StringType.cpp
new file mode 100644
index 0000000000..715d673b97
--- /dev/null
+++ b/js/src/vm/StringType.cpp
@@ -0,0 +1,2276 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/StringType-inl.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/Latin1.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/RangedPtr.h"
+#include "mozilla/TextUtils.h"
+#include "mozilla/Utf8.h"
+#include "mozilla/Vector.h"
+
+#include <algorithm> // std::{all_of,copy_n,enable_if,is_const,move}
+#include <iterator> // std::size
+#include <type_traits> // std::is_same, std::is_unsigned
+
+#include "jsfriendapi.h"
+#include "jsnum.h"
+
+#include "builtin/Boolean.h"
+#ifdef ENABLE_RECORD_TUPLE
+# include "builtin/RecordObject.h"
+#endif
+#include "frontend/BytecodeCompiler.h"
+#include "gc/AllocKind.h"
+#include "gc/MaybeRooted.h"
+#include "gc/Nursery.h"
+#include "js/CharacterEncoding.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/PropertyAndElement.h" // JS_DefineElement
+#include "js/StableStringChars.h"
+#include "js/UbiNode.h"
+#include "util/Unicode.h"
+#include "vm/GeckoProfiler.h"
+#include "vm/StaticStrings.h"
+#include "vm/ToSource.h" // js::ValueToSource
+
+#include "gc/Marking-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+#ifdef ENABLE_RECORD_TUPLE
+# include "vm/RecordType.h"
+# include "vm/TupleType.h"
+#endif
+
+using namespace js;
+
+using mozilla::AsWritableChars;
+using mozilla::ConvertLatin1toUtf16;
+using mozilla::IsAsciiDigit;
+using mozilla::IsUtf16Latin1;
+using mozilla::LossyConvertUtf16toLatin1;
+using mozilla::PodCopy;
+using mozilla::RangedPtr;
+using mozilla::RoundUpPow2;
+using mozilla::Span;
+
+using JS::AutoCheckCannotGC;
+using JS::AutoStableStringChars;
+
+using UniqueLatin1Chars = UniquePtr<Latin1Char[], JS::FreePolicy>;
+
+size_t JSString::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ // JSRope: do nothing, we'll count all children chars when we hit the leaf
+ // strings.
+ if (isRope()) {
+ return 0;
+ }
+
+ MOZ_ASSERT(isLinear());
+
+ // JSDependentString: do nothing, we'll count the chars when we hit the base
+ // string.
+ if (isDependent()) {
+ return 0;
+ }
+
+ // JSExternalString: Ask the embedding to tell us what's going on.
+ if (isExternal()) {
+ // Our callback isn't supposed to cause GC.
+ JS::AutoSuppressGCAnalysis nogc;
+ return asExternal().callbacks()->sizeOfBuffer(asExternal().twoByteChars(),
+ mallocSizeOf);
+ }
+
+ // JSExtensibleString: count the full capacity, not just the used space.
+ if (isExtensible()) {
+ JSExtensibleString& extensible = asExtensible();
+ return extensible.hasLatin1Chars()
+ ? mallocSizeOf(extensible.rawLatin1Chars())
+ : mallocSizeOf(extensible.rawTwoByteChars());
+ }
+
+ // JSInlineString, JSFatInlineString [JSInlineAtom, JSFatInlineAtom]: the
+ // chars are inline.
+ if (isInline()) {
+ return 0;
+ }
+
+ // Everything else: measure the space for the chars.
+ JSLinearString& linear = asLinear();
+ MOZ_ASSERT(linear.ownsMallocedChars());
+ return linear.hasLatin1Chars() ? mallocSizeOf(linear.rawLatin1Chars())
+ : mallocSizeOf(linear.rawTwoByteChars());
+}
+
+JS::ubi::Node::Size JS::ubi::Concrete<JSString>::size(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ JSString& str = get();
+ size_t size;
+ if (str.isAtom()) {
+ size =
+ str.isFatInline() ? sizeof(js::FatInlineAtom) : sizeof(js::NormalAtom);
+ } else {
+ size = str.isFatInline() ? sizeof(JSFatInlineString) : sizeof(JSString);
+ }
+
+ if (IsInsideNursery(&str)) {
+ size += Nursery::nurseryCellHeaderSize();
+ }
+
+ size += str.sizeOfExcludingThis(mallocSizeOf);
+
+ return size;
+}
+
+const char16_t JS::ubi::Concrete<JSString>::concreteTypeName[] = u"JSString";
+
+mozilla::Maybe<std::tuple<size_t, size_t> > JSString::encodeUTF8Partial(
+ const JS::AutoRequireNoGC& nogc, mozilla::Span<char> buffer) const {
+ mozilla::Vector<const JSString*, 16, SystemAllocPolicy> stack;
+ const JSString* current = this;
+ char16_t pendingLeadSurrogate = 0; // U+0000 means no pending lead surrogate
+ size_t totalRead = 0;
+ size_t totalWritten = 0;
+ for (;;) {
+ if (current->isRope()) {
+ JSRope& rope = current->asRope();
+ if (!stack.append(rope.rightChild())) {
+ // OOM
+ return mozilla::Nothing();
+ }
+ current = rope.leftChild();
+ continue;
+ }
+
+ JSLinearString& linear = current->asLinear();
+ if (MOZ_LIKELY(linear.hasLatin1Chars())) {
+ if (MOZ_UNLIKELY(pendingLeadSurrogate)) {
+ if (buffer.Length() < 3) {
+ return mozilla::Some(std::make_tuple(totalRead, totalWritten));
+ }
+ buffer[0] = '\xEF';
+ buffer[1] = '\xBF';
+ buffer[2] = '\xBD';
+ buffer = buffer.From(3);
+ totalRead += 1; // pendingLeadSurrogate
+ totalWritten += 3;
+ pendingLeadSurrogate = 0;
+ }
+ auto src = mozilla::AsChars(
+ mozilla::Span(linear.latin1Chars(nogc), linear.length()));
+ size_t read;
+ size_t written;
+ std::tie(read, written) =
+ mozilla::ConvertLatin1toUtf8Partial(src, buffer);
+ buffer = buffer.From(written);
+ totalRead += read;
+ totalWritten += written;
+ if (read < src.Length()) {
+ return mozilla::Some(std::make_tuple(totalRead, totalWritten));
+ }
+ } else {
+ auto src = mozilla::Span(linear.twoByteChars(nogc), linear.length());
+ if (MOZ_UNLIKELY(pendingLeadSurrogate)) {
+ char16_t first = 0;
+ if (!src.IsEmpty()) {
+ first = src[0];
+ }
+ if (unicode::IsTrailSurrogate(first)) {
+ // Got a surrogate pair
+ if (buffer.Length() < 4) {
+ return mozilla::Some(std::make_tuple(totalRead, totalWritten));
+ }
+ uint32_t astral = unicode::UTF16Decode(pendingLeadSurrogate, first);
+ buffer[0] = char(0b1111'0000 | (astral >> 18));
+ buffer[1] = char(0b1000'0000 | ((astral >> 12) & 0b11'1111));
+ buffer[2] = char(0b1000'0000 | ((astral >> 6) & 0b11'1111));
+ buffer[3] = char(0b1000'0000 | (astral & 0b11'1111));
+ src = src.From(1);
+ buffer = buffer.From(4);
+ totalRead += 2; // both pendingLeadSurrogate and first!
+ totalWritten += 4;
+ } else {
+ // unpaired surrogate
+ if (buffer.Length() < 3) {
+ return mozilla::Some(std::make_tuple(totalRead, totalWritten));
+ }
+ buffer[0] = '\xEF';
+ buffer[1] = '\xBF';
+ buffer[2] = '\xBD';
+ buffer = buffer.From(3);
+ totalRead += 1; // pendingLeadSurrogate
+ totalWritten += 3;
+ }
+ pendingLeadSurrogate = 0;
+ }
+ if (!src.IsEmpty()) {
+ char16_t last = src[src.Length() - 1];
+ if (unicode::IsLeadSurrogate(last)) {
+ src = src.To(src.Length() - 1);
+ pendingLeadSurrogate = last;
+ } else {
+ MOZ_ASSERT(!pendingLeadSurrogate);
+ }
+ size_t read;
+ size_t written;
+ std::tie(read, written) =
+ mozilla::ConvertUtf16toUtf8Partial(src, buffer);
+ buffer = buffer.From(written);
+ totalRead += read;
+ totalWritten += written;
+ if (read < src.Length()) {
+ return mozilla::Some(std::make_tuple(totalRead, totalWritten));
+ }
+ }
+ }
+ if (stack.empty()) {
+ break;
+ }
+ current = stack.popCopy();
+ }
+ if (MOZ_UNLIKELY(pendingLeadSurrogate)) {
+ if (buffer.Length() < 3) {
+ return mozilla::Some(std::make_tuple(totalRead, totalWritten));
+ }
+ buffer[0] = '\xEF';
+ buffer[1] = '\xBF';
+ buffer[2] = '\xBD';
+ // No need to update buffer and pendingLeadSurrogate anymore
+ totalRead += 1;
+ totalWritten += 3;
+ }
+ return mozilla::Some(std::make_tuple(totalRead, totalWritten));
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+
+template <typename CharT>
+/*static */
+void JSString::dumpChars(const CharT* s, size_t n, js::GenericPrinter& out) {
+ if (n == SIZE_MAX) {
+ n = 0;
+ while (s[n]) {
+ n++;
+ }
+ }
+
+ out.put("\"");
+ dumpCharsNoQuote(s, n, out);
+ out.putChar('"');
+}
+
+template void JSString::dumpChars(const Latin1Char* s, size_t n,
+ js::GenericPrinter& out);
+
+template void JSString::dumpChars(const char16_t* s, size_t n,
+ js::GenericPrinter& out);
+
+template <typename CharT>
+/*static */
+void JSString::dumpCharsNoQuote(const CharT* s, size_t n,
+ js::GenericPrinter& out) {
+ for (size_t i = 0; i < n; i++) {
+ char16_t c = s[i];
+ if (c == '\n') {
+ out.put("\\n");
+ } else if (c == '\t') {
+ out.put("\\t");
+ } else if (c >= 32 && c < 127) {
+ out.putChar((char)s[i]);
+ } else if (c <= 255) {
+ out.printf("\\x%02x", unsigned(c));
+ } else {
+ out.printf("\\u%04x", unsigned(c));
+ }
+ }
+}
+
+template void JSString::dumpCharsNoQuote(const Latin1Char* s, size_t n,
+ js::GenericPrinter& out);
+
+template void JSString::dumpCharsNoQuote(const char16_t* s, size_t n,
+ js::GenericPrinter& out);
+
+void JSString::dumpCharsNoNewline(js::GenericPrinter& out) {
+ if (JSLinearString* linear = ensureLinear(nullptr)) {
+ AutoCheckCannotGC nogc;
+ if (hasLatin1Chars()) {
+ out.put("[Latin 1]");
+ dumpChars(linear->latin1Chars(nogc), length(), out);
+ } else {
+ out.put("[2 byte]");
+ dumpChars(linear->twoByteChars(nogc), length(), out);
+ }
+ } else {
+ out.put("(oom in JSString::dumpCharsNoNewline)");
+ }
+}
+
+void JSString::dumpCharsNoQuote(js::GenericPrinter& out) {
+ if (JSLinearString* linear = ensureLinear(nullptr)) {
+ AutoCheckCannotGC nogc;
+ if (hasLatin1Chars()) {
+ dumpCharsNoQuote(linear->latin1Chars(nogc), length(), out);
+ } else {
+ dumpCharsNoQuote(linear->twoByteChars(nogc), length(), out);
+ }
+ } else {
+ out.put("(oom in JSString::dumpCharsNoNewline)");
+ }
+}
+
+void JSString::dump() {
+ js::Fprinter out(stderr);
+ dump(out);
+}
+
+void JSString::dump(js::GenericPrinter& out) {
+ dumpNoNewline(out);
+ out.putChar('\n');
+}
+
+void JSString::dumpNoNewline(js::GenericPrinter& out) {
+ if (JSLinearString* linear = ensureLinear(nullptr)) {
+ AutoCheckCannotGC nogc;
+ if (hasLatin1Chars()) {
+ const Latin1Char* chars = linear->latin1Chars(nogc);
+ out.printf("JSString* (%p) = Latin1Char * (%p) = ", (void*)this,
+ (void*)chars);
+ dumpChars(chars, length(), out);
+ } else {
+ const char16_t* chars = linear->twoByteChars(nogc);
+ out.printf("JSString* (%p) = char16_t * (%p) = ", (void*)this,
+ (void*)chars);
+ dumpChars(chars, length(), out);
+ }
+ } else {
+ out.put("(oom in JSString::dump)");
+ }
+}
+
+void JSString::dumpRepresentation(js::GenericPrinter& out, int indent) const {
+ if (isRope()) {
+ asRope().dumpRepresentation(out, indent);
+ } else if (isDependent()) {
+ asDependent().dumpRepresentation(out, indent);
+ } else if (isExternal()) {
+ asExternal().dumpRepresentation(out, indent);
+ } else if (isExtensible()) {
+ asExtensible().dumpRepresentation(out, indent);
+ } else if (isInline()) {
+ asInline().dumpRepresentation(out, indent);
+ } else if (isLinear()) {
+ asLinear().dumpRepresentation(out, indent);
+ } else {
+ MOZ_CRASH("Unexpected JSString representation");
+ }
+}
+
+void JSString::dumpRepresentationHeader(js::GenericPrinter& out,
+ const char* subclass) const {
+ uint32_t flags = JSString::flags();
+ // Print the string's address as an actual C++ expression, to facilitate
+ // copy-and-paste into a debugger.
+ out.printf("((%s*) %p) length: %zu flags: 0x%x", subclass, this, length(),
+ flags);
+ if (flags & LINEAR_BIT) out.put(" LINEAR");
+ if (flags & DEPENDENT_BIT) out.put(" DEPENDENT");
+ if (flags & INLINE_CHARS_BIT) out.put(" INLINE_CHARS");
+ if (flags & ATOM_BIT)
+ out.put(" ATOM");
+ else
+ out.put(" (NON ATOM)");
+ if (isPermanentAtom()) out.put(" PERMANENT");
+ if (flags & LATIN1_CHARS_BIT) out.put(" LATIN1");
+ if (flags & INDEX_VALUE_BIT) out.printf(" INDEX_VALUE(%u)", getIndexValue());
+ if (!isTenured()) out.put(" NURSERY");
+ out.putChar('\n');
+}
+
+void JSLinearString::dumpRepresentationChars(js::GenericPrinter& out,
+ int indent) const {
+ if (hasLatin1Chars()) {
+ out.printf("%*schars: ((Latin1Char*) %p) ", indent, "", rawLatin1Chars());
+ dumpChars(rawLatin1Chars(), length(), out);
+ } else {
+ out.printf("%*schars: ((char16_t*) %p) ", indent, "", rawTwoByteChars());
+ dumpChars(rawTwoByteChars(), length(), out);
+ }
+ out.putChar('\n');
+}
+
+bool JSString::equals(const char* s) {
+ JSLinearString* linear = ensureLinear(nullptr);
+ if (!linear) {
+ // This is DEBUG-only code.
+ fprintf(stderr, "OOM in JSString::equals!\n");
+ return false;
+ }
+
+ return StringEqualsAscii(linear, s);
+}
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW) */
+
+JSExtensibleString& JSLinearString::makeExtensible(size_t capacity) {
+ MOZ_ASSERT(!isDependent());
+ MOZ_ASSERT(!isInline());
+ MOZ_ASSERT(!isAtom());
+ MOZ_ASSERT(!isExternal());
+ MOZ_ASSERT(capacity >= length());
+ js::RemoveCellMemory(this, allocSize(), js::MemoryUse::StringContents);
+ setLengthAndFlags(length(), flags() | EXTENSIBLE_FLAGS);
+ d.s.u3.capacity = capacity;
+ js::AddCellMemory(this, allocSize(), js::MemoryUse::StringContents);
+ return asExtensible();
+}
+
+template <typename CharT>
+static MOZ_ALWAYS_INLINE bool AllocChars(JSString* str, size_t length,
+ CharT** chars, size_t* capacity) {
+ /*
+ * Grow by 12.5% if the buffer is very large. Otherwise, round up to the
+ * next power of 2. This is similar to what we do with arrays; see
+ * JSObject::ensureDenseArrayElements.
+ */
+ static const size_t DOUBLING_MAX = 1024 * 1024;
+ *capacity =
+ length > DOUBLING_MAX ? length + (length / 8) : RoundUpPow2(length);
+
+ static_assert(JSString::MAX_LENGTH * sizeof(CharT) <= UINT32_MAX);
+ *chars =
+ str->zone()->pod_arena_malloc<CharT>(js::StringBufferArena, *capacity);
+ return *chars != nullptr;
+}
+
+UniqueLatin1Chars JSRope::copyLatin1Chars(JSContext* maybecx,
+ arena_id_t destArenaId) const {
+ return copyCharsInternal<Latin1Char>(maybecx, destArenaId);
+}
+
+UniqueTwoByteChars JSRope::copyTwoByteChars(JSContext* maybecx,
+ arena_id_t destArenaId) const {
+ return copyCharsInternal<char16_t>(maybecx, destArenaId);
+}
+
+template <typename CharT>
+UniquePtr<CharT[], JS::FreePolicy> JSRope::copyCharsInternal(
+ JSContext* maybecx, arena_id_t destArenaId) const {
+ // Left-leaning ropes are far more common than right-leaning ropes, so
+ // perform a non-destructive traversal of the rope, right node first,
+ // splatting each node's characters into a contiguous buffer.
+
+ size_t n = length();
+
+ UniquePtr<CharT[], JS::FreePolicy> out;
+ if (maybecx) {
+ out.reset(maybecx->pod_arena_malloc<CharT>(destArenaId, n));
+ } else {
+ out.reset(js_pod_arena_malloc<CharT>(destArenaId, n));
+ }
+
+ if (!out) {
+ return nullptr;
+ }
+
+ Vector<const JSString*, 8, SystemAllocPolicy> nodeStack;
+ const JSString* str = this;
+ CharT* end = out.get() + str->length();
+ while (true) {
+ if (str->isRope()) {
+ if (!nodeStack.append(str->asRope().leftChild())) {
+ if (maybecx) {
+ ReportOutOfMemory(maybecx);
+ }
+ return nullptr;
+ }
+ str = str->asRope().rightChild();
+ } else {
+ end -= str->length();
+ CopyChars(end, str->asLinear());
+ if (nodeStack.empty()) {
+ break;
+ }
+ str = nodeStack.popCopy();
+ }
+ }
+ MOZ_ASSERT(end == out.get());
+
+ return out;
+}
+
+template <typename CharT>
+void AddStringToHash(uint32_t* hash, const CharT* chars, size_t len) {
+ // It's tempting to use |HashString| instead of this loop, but that's
+ // slightly different than our existing implementation for non-ropes. We
+ // want to pretend we have a contiguous set of chars so we need to
+ // accumulate char by char rather than generate a new hash for substring
+ // and then accumulate that.
+ for (size_t i = 0; i < len; i++) {
+ *hash = mozilla::AddToHash(*hash, chars[i]);
+ }
+}
+
+void AddStringToHash(uint32_t* hash, const JSString* str) {
+ AutoCheckCannotGC nogc;
+ const auto& s = str->asLinear();
+ if (s.hasLatin1Chars()) {
+ AddStringToHash(hash, s.latin1Chars(nogc), s.length());
+ } else {
+ AddStringToHash(hash, s.twoByteChars(nogc), s.length());
+ }
+}
+
+bool JSRope::hash(uint32_t* outHash) const {
+ Vector<const JSString*, 8, SystemAllocPolicy> nodeStack;
+ const JSString* str = this;
+
+ *outHash = 0;
+
+ while (true) {
+ if (str->isRope()) {
+ if (!nodeStack.append(str->asRope().rightChild())) {
+ return false;
+ }
+ str = str->asRope().leftChild();
+ } else {
+ AddStringToHash(outHash, str);
+ if (nodeStack.empty()) {
+ break;
+ }
+ str = nodeStack.popCopy();
+ }
+ }
+
+ return true;
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+void JSRope::dumpRepresentation(js::GenericPrinter& out, int indent) const {
+ dumpRepresentationHeader(out, "JSRope");
+ indent += 2;
+
+ out.printf("%*sleft: ", indent, "");
+ leftChild()->dumpRepresentation(out, indent);
+
+ out.printf("%*sright: ", indent, "");
+ rightChild()->dumpRepresentation(out, indent);
+}
+#endif
+
+namespace js {
+
+template <>
+void CopyChars(char16_t* dest, const JSLinearString& str) {
+ AutoCheckCannotGC nogc;
+ if (str.hasTwoByteChars()) {
+ PodCopy(dest, str.twoByteChars(nogc), str.length());
+ } else {
+ CopyAndInflateChars(dest, str.latin1Chars(nogc), str.length());
+ }
+}
+
+template <>
+void CopyChars(Latin1Char* dest, const JSLinearString& str) {
+ AutoCheckCannotGC nogc;
+ if (str.hasLatin1Chars()) {
+ PodCopy(dest, str.latin1Chars(nogc), str.length());
+ } else {
+ /*
+ * When we flatten a TwoByte rope, we turn child ropes (including Latin1
+ * ropes) into TwoByte dependent strings. If one of these strings is
+ * also part of another Latin1 rope tree, we can have a Latin1 rope with
+ * a TwoByte descendent and we end up here when we flatten it. Although
+ * the chars are stored as TwoByte, we know they must be in the Latin1
+ * range, so we can safely deflate here.
+ */
+ size_t len = str.length();
+ const char16_t* chars = str.twoByteChars(nogc);
+ auto src = Span(chars, len);
+ MOZ_ASSERT(IsUtf16Latin1(src));
+ LossyConvertUtf16toLatin1(src, AsWritableChars(Span(dest, len)));
+ }
+}
+
+} /* namespace js */
+
+template <typename CharT>
+static constexpr uint32_t StringFlagsForCharType(uint32_t baseFlags) {
+ if constexpr (std::is_same_v<CharT, char16_t>) {
+ return baseFlags;
+ }
+
+ return baseFlags | JSString::LATIN1_CHARS_BIT;
+}
+
+static bool UpdateNurseryBuffersOnTransfer(js::Nursery& nursery, JSString* from,
+ JSString* to, void* buffer,
+ size_t size) {
+ // Update the list of buffers associated with nursery cells when |buffer| is
+ // moved from string |from| to string |to|, depending on whether those strings
+ // are in the nursery or not.
+
+ if (from->isTenured() && !to->isTenured()) {
+ // Tenured leftmost child is giving its chars buffer to the
+ // nursery-allocated root node.
+ if (!nursery.registerMallocedBuffer(buffer, size)) {
+ return false;
+ }
+ } else if (!from->isTenured() && to->isTenured()) {
+ // Leftmost child is giving its nursery-held chars buffer to a
+ // tenured string.
+ nursery.removeMallocedBuffer(buffer, size);
+ }
+
+ return true;
+}
+
+static bool CanReuseLeftmostBuffer(JSString* leftmostChild, size_t wholeLength,
+ bool hasTwoByteChars) {
+ if (!leftmostChild->isExtensible()) {
+ return false;
+ }
+
+ JSExtensibleString& str = leftmostChild->asExtensible();
+ return str.capacity() >= wholeLength &&
+ str.hasTwoByteChars() == hasTwoByteChars;
+}
+
+JSLinearString* JSRope::flatten(JSContext* maybecx) {
+ MOZ_ASSERT_IF(maybecx, maybecx->isMainThreadContext());
+
+ mozilla::Maybe<AutoGeckoProfilerEntry> entry;
+ if (maybecx) {
+ entry.emplace(maybecx, "JSRope::flatten");
+ }
+
+ JSLinearString* str = flattenInternal();
+ if (!str && maybecx) {
+ ReportOutOfMemory(maybecx);
+ }
+
+ return str;
+}
+
+JSLinearString* JSRope::flattenInternal() {
+ if (zone()->needsIncrementalBarrier()) {
+ return flattenInternal<WithIncrementalBarrier>();
+ }
+
+ return flattenInternal<NoBarrier>();
+}
+
+template <JSRope::UsingBarrier usingBarrier>
+JSLinearString* JSRope::flattenInternal() {
+ if (hasTwoByteChars()) {
+ return flattenInternal<usingBarrier, char16_t>(this);
+ }
+
+ return flattenInternal<usingBarrier, Latin1Char>(this);
+}
+
+template <JSRope::UsingBarrier usingBarrier, typename CharT>
+/* static */
+JSLinearString* JSRope::flattenInternal(JSRope* root) {
+ /*
+ * Consider the DAG of JSRopes rooted at |root|, with non-JSRopes as
+ * its leaves. Mutate the root JSRope into a JSExtensibleString containing
+ * the full flattened text that the root represents, and mutate all other
+ * JSRopes in the interior of the DAG into JSDependentStrings that refer to
+ * this new JSExtensibleString.
+ *
+ * If the leftmost leaf of our DAG is a JSExtensibleString, consider
+ * stealing its buffer for use in our new root, and transforming it into a
+ * JSDependentString too. Do not mutate any of the other leaves.
+ *
+ * Perform a depth-first dag traversal, splatting each node's characters
+ * into a contiguous buffer. Visit each rope node three times:
+ * 1. record position in the buffer and recurse into left child;
+ * 2. recurse into the right child;
+ * 3. transform the node into a dependent string.
+ * To avoid maintaining a stack, tree nodes are mutated to indicate how many
+ * times they have been visited. Since ropes can be dags, a node may be
+ * encountered multiple times during traversal. However, step 3 above leaves
+ * a valid dependent string, so everything works out.
+ *
+ * While ropes avoid all sorts of quadratic cases with string concatenation,
+ * they can't help when ropes are immediately flattened. One idiomatic case
+ * that we'd like to keep linear (and has traditionally been linear in SM
+ * and other JS engines) is:
+ *
+ * while (...) {
+ * s += ...
+ * s.flatten
+ * }
+ *
+ * Two behaviors accomplish this:
+ *
+ * - When the leftmost non-rope in the DAG we're flattening is a
+ * JSExtensibleString with sufficient capacity to hold the entire
+ * flattened string, we just flatten the DAG into its buffer. Then, when
+ * we transform the root of the DAG from a JSRope into a
+ * JSExtensibleString, we steal that buffer, and change the victim from a
+ * JSExtensibleString to a JSDependentString. In this case, the left-hand
+ * side of the string never needs to be copied.
+ *
+ * - Otherwise, we round up the total flattened size and create a fresh
+ * JSExtensibleString with that much capacity. If this in turn becomes the
+ * leftmost leaf of a subsequent flatten, we will hopefully be able to
+ * fill it, as in the case above.
+ *
+ * Note that, even though the code for creating JSDependentStrings avoids
+ * creating dependents of dependents, we can create that situation here: the
+ * JSExtensibleStrings we transform into JSDependentStrings might have
+ * JSDependentStrings pointing to them already. Stealing the buffer doesn't
+ * change its address, only its owning JSExtensibleString, so all chars()
+ * pointers in the JSDependentStrings are still valid.
+ */
+ const size_t wholeLength = root->length();
+ size_t wholeCapacity;
+ CharT* wholeChars;
+
+ AutoCheckCannotGC nogc;
+
+ Nursery& nursery = root->runtimeFromMainThread()->gc.nursery();
+
+ /* Find the left most string, containing the first string. */
+ JSRope* leftmostRope = root;
+ while (leftmostRope->leftChild()->isRope()) {
+ leftmostRope = &leftmostRope->leftChild()->asRope();
+ }
+ JSString* leftmostChild = leftmostRope->leftChild();
+
+ bool reuseLeftmostBuffer = CanReuseLeftmostBuffer(
+ leftmostChild, wholeLength, std::is_same_v<CharT, char16_t>);
+
+ if (reuseLeftmostBuffer) {
+ JSExtensibleString& left = leftmostChild->asExtensible();
+ wholeCapacity = left.capacity();
+ wholeChars = const_cast<CharT*>(left.nonInlineChars<CharT>(nogc));
+
+ // Nursery::registerMallocedBuffer is fallible, so attempt it first before
+ // doing anything irreversible.
+ if (!UpdateNurseryBuffersOnTransfer(nursery, &left, root, wholeChars,
+ wholeCapacity * sizeof(CharT))) {
+ return nullptr;
+ }
+ } else {
+ // If we can't reuse the leftmost child's buffer, allocate a new one.
+ if (!AllocChars(root, wholeLength, &wholeChars, &wholeCapacity)) {
+ return nullptr;
+ }
+
+ if (!root->isTenured()) {
+ if (!nursery.registerMallocedBuffer(wholeChars,
+ wholeCapacity * sizeof(CharT))) {
+ js_free(wholeChars);
+ return nullptr;
+ }
+ }
+ }
+
+ JSRope* str = root;
+ CharT* pos = wholeChars;
+
+ JSRope* parent = nullptr;
+ uint32_t parentFlag = 0;
+
+first_visit_node : {
+ MOZ_ASSERT_IF(str != root, parent && parentFlag);
+ MOZ_ASSERT(!str->asRope().isBeingFlattened());
+
+ ropeBarrierDuringFlattening<usingBarrier>(str);
+
+ JSString& left = *str->d.s.u2.left;
+ str->d.s.u2.parent = parent;
+ str->setFlagBit(parentFlag);
+ parent = nullptr;
+ parentFlag = 0;
+
+ if (left.isRope()) {
+ /* Return to this node when 'left' done, then goto visit_right_child. */
+ parent = str;
+ parentFlag = FLATTEN_VISIT_RIGHT;
+ str = &left.asRope();
+ goto first_visit_node;
+ }
+ if (!(reuseLeftmostBuffer && pos == wholeChars)) {
+ CopyChars(pos, left.asLinear());
+ }
+ pos += left.length();
+}
+
+visit_right_child : {
+ JSString& right = *str->d.s.u3.right;
+ if (right.isRope()) {
+ /* Return to this node when 'right' done, then goto finish_node. */
+ parent = str;
+ parentFlag = FLATTEN_FINISH_NODE;
+ str = &right.asRope();
+ goto first_visit_node;
+ }
+ CopyChars(pos, right.asLinear());
+ pos += right.length();
+}
+
+finish_node : {
+ if (str == root) {
+ goto finish_root;
+ }
+
+ MOZ_ASSERT(pos >= wholeChars);
+ CharT* chars = pos - str->length();
+ JSRope* strParent = str->d.s.u2.parent;
+ str->setNonInlineChars(chars);
+
+ MOZ_ASSERT(str->asRope().isBeingFlattened());
+ mozilla::DebugOnly<bool> visitRight = str->flags() & FLATTEN_VISIT_RIGHT;
+ bool finishNode = str->flags() & FLATTEN_FINISH_NODE;
+ MOZ_ASSERT(visitRight != finishNode);
+
+ // This also clears the flags related to flattening.
+ str->setLengthAndFlags(str->length(),
+ StringFlagsForCharType<CharT>(INIT_DEPENDENT_FLAGS));
+ str->d.s.u3.base =
+ reinterpret_cast<JSLinearString*>(root); /* will be true on exit */
+
+ // Every interior (rope) node in the rope's tree will be visited during
+ // the traversal and post-barriered here, so earlier additions of
+ // dependent.base -> root pointers are handled by this barrier as well.
+ //
+ // The only time post-barriers need do anything is when the root is in
+ // the nursery. Note that the root was a rope but will be an extensible
+ // string when we return, so it will not point to any strings and need
+ // not be barriered.
+ if (str->isTenured() && !root->isTenured()) {
+ root->storeBuffer()->putWholeCell(str);
+ }
+
+ str = strParent;
+ if (finishNode) {
+ goto finish_node;
+ }
+ MOZ_ASSERT(visitRight);
+ goto visit_right_child;
+}
+
+finish_root:
+ // We traversed all the way back up to the root so we're finished.
+ MOZ_ASSERT(str == root);
+ MOZ_ASSERT(pos == wholeChars + wholeLength);
+
+ root->setLengthAndFlags(wholeLength,
+ StringFlagsForCharType<CharT>(EXTENSIBLE_FLAGS));
+ root->setNonInlineChars(wholeChars);
+ root->d.s.u3.capacity = wholeCapacity;
+ AddCellMemory(root, root->asLinear().allocSize(), MemoryUse::StringContents);
+
+ if (reuseLeftmostBuffer) {
+ // Remove memory association for left node we're about to make into a
+ // dependent string.
+ JSString& left = *leftmostChild;
+ RemoveCellMemory(&left, left.allocSize(), MemoryUse::StringContents);
+
+ uint32_t flags = INIT_DEPENDENT_FLAGS;
+ if (left.inStringToAtomCache()) {
+ flags |= IN_STRING_TO_ATOM_CACHE;
+ }
+ left.setLengthAndFlags(left.length(), StringFlagsForCharType<CharT>(flags));
+ left.d.s.u3.base = &root->asLinear();
+ if (left.isTenured() && !root->isTenured()) {
+ // leftmost child -> root is a tenured -> nursery edge.
+ root->storeBuffer()->putWholeCell(&left);
+ }
+ }
+
+ return &root->asLinear();
+}
+
+template <JSRope::UsingBarrier usingBarrier>
+/* static */
+inline void JSRope::ropeBarrierDuringFlattening(JSRope* rope) {
+ MOZ_ASSERT(!rope->isBeingFlattened());
+ if constexpr (usingBarrier) {
+ gc::PreWriteBarrierDuringFlattening(rope->leftChild());
+ gc::PreWriteBarrierDuringFlattening(rope->rightChild());
+ }
+}
+
+template <AllowGC allowGC>
+static JSLinearString* EnsureLinear(
+ JSContext* cx,
+ typename MaybeRooted<JSString*, allowGC>::HandleType string) {
+ JSLinearString* linear = string->ensureLinear(cx);
+ // Don't report an exception if GC is not allowed, just return nullptr.
+ if (!linear && !allowGC) {
+ cx->recoverFromOutOfMemory();
+ }
+ return linear;
+}
+
+template <AllowGC allowGC>
+JSString* js::ConcatStrings(
+ JSContext* cx, typename MaybeRooted<JSString*, allowGC>::HandleType left,
+ typename MaybeRooted<JSString*, allowGC>::HandleType right, gc::Heap heap) {
+ MOZ_ASSERT_IF(!left->isAtom(), cx->isInsideCurrentZone(left));
+ MOZ_ASSERT_IF(!right->isAtom(), cx->isInsideCurrentZone(right));
+
+ size_t leftLen = left->length();
+ if (leftLen == 0) {
+ return right;
+ }
+
+ size_t rightLen = right->length();
+ if (rightLen == 0) {
+ return left;
+ }
+
+ size_t wholeLength = leftLen + rightLen;
+ if (MOZ_UNLIKELY(wholeLength > JSString::MAX_LENGTH)) {
+ // Don't report an exception if GC is not allowed, just return nullptr.
+ if (allowGC) {
+ js::ReportOversizedAllocation(cx, JSMSG_ALLOC_OVERFLOW);
+ }
+ return nullptr;
+ }
+
+ bool isLatin1 = left->hasLatin1Chars() && right->hasLatin1Chars();
+ bool canUseInline = isLatin1
+ ? JSInlineString::lengthFits<Latin1Char>(wholeLength)
+ : JSInlineString::lengthFits<char16_t>(wholeLength);
+ if (canUseInline) {
+ Latin1Char* latin1Buf = nullptr; // initialize to silence GCC warning
+ char16_t* twoByteBuf = nullptr; // initialize to silence GCC warning
+ JSInlineString* str =
+ isLatin1
+ ? AllocateInlineString<allowGC>(cx, wholeLength, &latin1Buf, heap)
+ : AllocateInlineString<allowGC>(cx, wholeLength, &twoByteBuf, heap);
+ if (!str) {
+ return nullptr;
+ }
+
+ AutoCheckCannotGC nogc;
+ JSLinearString* leftLinear = EnsureLinear<allowGC>(cx, left);
+ if (!leftLinear) {
+ return nullptr;
+ }
+ JSLinearString* rightLinear = EnsureLinear<allowGC>(cx, right);
+ if (!rightLinear) {
+ return nullptr;
+ }
+
+ if (isLatin1) {
+ PodCopy(latin1Buf, leftLinear->latin1Chars(nogc), leftLen);
+ PodCopy(latin1Buf + leftLen, rightLinear->latin1Chars(nogc), rightLen);
+ } else {
+ if (leftLinear->hasTwoByteChars()) {
+ PodCopy(twoByteBuf, leftLinear->twoByteChars(nogc), leftLen);
+ } else {
+ CopyAndInflateChars(twoByteBuf, leftLinear->latin1Chars(nogc), leftLen);
+ }
+ if (rightLinear->hasTwoByteChars()) {
+ PodCopy(twoByteBuf + leftLen, rightLinear->twoByteChars(nogc),
+ rightLen);
+ } else {
+ CopyAndInflateChars(twoByteBuf + leftLen,
+ rightLinear->latin1Chars(nogc), rightLen);
+ }
+ }
+
+ return str;
+ }
+
+ return JSRope::new_<allowGC>(cx, left, right, wholeLength, heap);
+}
+
+template JSString* js::ConcatStrings<CanGC>(JSContext* cx, HandleString left,
+ HandleString right, gc::Heap heap);
+
+template JSString* js::ConcatStrings<NoGC>(JSContext* cx, JSString* const& left,
+ JSString* const& right,
+ gc::Heap heap);
+
+/**
+ * Copy |src[0..length]| to |dest[0..length]| when copying doesn't narrow and
+ * therefore can't lose information.
+ */
+static inline void FillChars(char16_t* dest, const unsigned char* src,
+ size_t length) {
+ ConvertLatin1toUtf16(AsChars(Span(src, length)), Span(dest, length));
+}
+
+static inline void FillChars(char16_t* dest, const char16_t* src,
+ size_t length) {
+ PodCopy(dest, src, length);
+}
+
+static inline void FillChars(unsigned char* dest, const unsigned char* src,
+ size_t length) {
+ PodCopy(dest, src, length);
+}
+
+/**
+ * Copy |src[0..length]| to |dest[0..length]| when copying *does* narrow, but
+ * the user guarantees every runtime |src[i]| value can be stored without change
+ * of value in |dest[i]|.
+ */
+static inline void FillFromCompatible(unsigned char* dest, const char16_t* src,
+ size_t length) {
+ LossyConvertUtf16toLatin1(Span(src, length),
+ AsWritableChars(Span(dest, length)));
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+void JSDependentString::dumpRepresentation(js::GenericPrinter& out,
+ int indent) const {
+ dumpRepresentationHeader(out, "JSDependentString");
+ indent += 2;
+ out.printf("%*soffset: %zu\n", indent, "", baseOffset());
+ out.printf("%*sbase: ", indent, "");
+ base()->dumpRepresentation(out, indent);
+}
+#endif
+
+bool js::EqualChars(const JSLinearString* str1, const JSLinearString* str2) {
+ MOZ_ASSERT(str1->length() == str2->length());
+
+ size_t len = str1->length();
+
+ AutoCheckCannotGC nogc;
+ if (str1->hasTwoByteChars()) {
+ if (str2->hasTwoByteChars()) {
+ return EqualChars(str1->twoByteChars(nogc), str2->twoByteChars(nogc),
+ len);
+ }
+
+ return EqualChars(str2->latin1Chars(nogc), str1->twoByteChars(nogc), len);
+ }
+
+ if (str2->hasLatin1Chars()) {
+ return EqualChars(str1->latin1Chars(nogc), str2->latin1Chars(nogc), len);
+ }
+
+ return EqualChars(str1->latin1Chars(nogc), str2->twoByteChars(nogc), len);
+}
+
+bool js::HasSubstringAt(JSLinearString* text, JSLinearString* pat,
+ size_t start) {
+ MOZ_ASSERT(start + pat->length() <= text->length());
+
+ size_t patLen = pat->length();
+
+ AutoCheckCannotGC nogc;
+ if (text->hasLatin1Chars()) {
+ const Latin1Char* textChars = text->latin1Chars(nogc) + start;
+ if (pat->hasLatin1Chars()) {
+ return EqualChars(textChars, pat->latin1Chars(nogc), patLen);
+ }
+
+ return EqualChars(textChars, pat->twoByteChars(nogc), patLen);
+ }
+
+ const char16_t* textChars = text->twoByteChars(nogc) + start;
+ if (pat->hasTwoByteChars()) {
+ return EqualChars(textChars, pat->twoByteChars(nogc), patLen);
+ }
+
+ return EqualChars(pat->latin1Chars(nogc), textChars, patLen);
+}
+
+bool js::EqualStrings(JSContext* cx, JSString* str1, JSString* str2,
+ bool* result) {
+ if (str1 == str2) {
+ *result = true;
+ return true;
+ }
+
+ size_t length1 = str1->length();
+ if (length1 != str2->length()) {
+ *result = false;
+ return true;
+ }
+
+ JSLinearString* linear1 = str1->ensureLinear(cx);
+ if (!linear1) {
+ return false;
+ }
+ JSLinearString* linear2 = str2->ensureLinear(cx);
+ if (!linear2) {
+ return false;
+ }
+
+ *result = EqualChars(linear1, linear2);
+ return true;
+}
+
+bool js::EqualStrings(const JSLinearString* str1, const JSLinearString* str2) {
+ if (str1 == str2) {
+ return true;
+ }
+
+ size_t length1 = str1->length();
+ if (length1 != str2->length()) {
+ return false;
+ }
+
+ return EqualChars(str1, str2);
+}
+
+int32_t js::CompareChars(const char16_t* s1, size_t len1, JSLinearString* s2) {
+ AutoCheckCannotGC nogc;
+ return s2->hasLatin1Chars()
+ ? CompareChars(s1, len1, s2->latin1Chars(nogc), s2->length())
+ : CompareChars(s1, len1, s2->twoByteChars(nogc), s2->length());
+}
+
+static int32_t CompareStringsImpl(const JSLinearString* str1,
+ const JSLinearString* str2) {
+ size_t len1 = str1->length();
+ size_t len2 = str2->length();
+
+ AutoCheckCannotGC nogc;
+ if (str1->hasLatin1Chars()) {
+ const Latin1Char* chars1 = str1->latin1Chars(nogc);
+ return str2->hasLatin1Chars()
+ ? CompareChars(chars1, len1, str2->latin1Chars(nogc), len2)
+ : CompareChars(chars1, len1, str2->twoByteChars(nogc), len2);
+ }
+
+ const char16_t* chars1 = str1->twoByteChars(nogc);
+ return str2->hasLatin1Chars()
+ ? CompareChars(chars1, len1, str2->latin1Chars(nogc), len2)
+ : CompareChars(chars1, len1, str2->twoByteChars(nogc), len2);
+}
+
+bool js::CompareStrings(JSContext* cx, JSString* str1, JSString* str2,
+ int32_t* result) {
+ MOZ_ASSERT(str1);
+ MOZ_ASSERT(str2);
+
+ if (str1 == str2) {
+ *result = 0;
+ return true;
+ }
+
+ JSLinearString* linear1 = str1->ensureLinear(cx);
+ if (!linear1) {
+ return false;
+ }
+
+ JSLinearString* linear2 = str2->ensureLinear(cx);
+ if (!linear2) {
+ return false;
+ }
+
+ *result = CompareStringsImpl(linear1, linear2);
+ return true;
+}
+
+int32_t js::CompareStrings(const JSLinearString* str1,
+ const JSLinearString* str2) {
+ MOZ_ASSERT(str1);
+ MOZ_ASSERT(str2);
+
+ if (str1 == str2) {
+ return 0;
+ }
+ return CompareStringsImpl(str1, str2);
+}
+
+bool js::StringIsAscii(JSLinearString* str) {
+ JS::AutoCheckCannotGC nogc;
+ if (str->hasLatin1Chars()) {
+ return mozilla::IsAscii(
+ AsChars(Span(str->latin1Chars(nogc), str->length())));
+ }
+ return mozilla::IsAscii(Span(str->twoByteChars(nogc), str->length()));
+}
+
+bool js::StringEqualsAscii(JSLinearString* str, const char* asciiBytes) {
+ return StringEqualsAscii(str, asciiBytes, strlen(asciiBytes));
+}
+
+bool js::StringEqualsAscii(JSLinearString* str, const char* asciiBytes,
+ size_t length) {
+ MOZ_ASSERT(JS::StringIsASCII(Span(asciiBytes, length)));
+
+ if (length != str->length()) {
+ return false;
+ }
+
+ const Latin1Char* latin1 = reinterpret_cast<const Latin1Char*>(asciiBytes);
+
+ AutoCheckCannotGC nogc;
+ return str->hasLatin1Chars()
+ ? EqualChars(latin1, str->latin1Chars(nogc), length)
+ : EqualChars(latin1, str->twoByteChars(nogc), length);
+}
+
+template <typename CharT>
+bool js::CheckStringIsIndex(const CharT* s, size_t length, uint32_t* indexp) {
+ MOZ_ASSERT(length > 0);
+ MOZ_ASSERT(length <= UINT32_CHAR_BUFFER_LENGTH);
+ MOZ_ASSERT(IsAsciiDigit(*s),
+ "caller's fast path must have checked first char");
+
+ RangedPtr<const CharT> cp(s, length);
+ const RangedPtr<const CharT> end(s + length, s, length);
+
+ uint32_t index = AsciiDigitToNumber(*cp++);
+ uint32_t oldIndex = 0;
+ uint32_t c = 0;
+
+ if (index != 0) {
+ // Consume remaining characters only if the first character isn't '0'.
+ while (cp < end && IsAsciiDigit(*cp)) {
+ oldIndex = index;
+ c = AsciiDigitToNumber(*cp);
+ index = 10 * index + c;
+ cp++;
+ }
+ }
+
+ // It's not an integer index if there are characters after the number.
+ if (cp != end) {
+ return false;
+ }
+
+ // Look out for "4294967295" and larger-number strings that fit in
+ // UINT32_CHAR_BUFFER_LENGTH: only unsigned 32-bit integers less than or equal
+ // to MAX_ARRAY_INDEX shall pass.
+ if (oldIndex < MAX_ARRAY_INDEX / 10 ||
+ (oldIndex == MAX_ARRAY_INDEX / 10 && c <= (MAX_ARRAY_INDEX % 10))) {
+ MOZ_ASSERT(index <= MAX_ARRAY_INDEX);
+ *indexp = index;
+ return true;
+ }
+
+ return false;
+}
+
+template bool js::CheckStringIsIndex(const Latin1Char* s, size_t length,
+ uint32_t* indexp);
+template bool js::CheckStringIsIndex(const char16_t* s, size_t length,
+ uint32_t* indexp);
+
+template <typename CharT>
+static uint32_t AtomCharsToIndex(const CharT* s, size_t length) {
+ // Chars are known to be a valid index value (as determined by
+ // CheckStringIsIndex) that didn't fit in the "index value" bits in the
+ // header.
+
+ MOZ_ASSERT(length > 0);
+ MOZ_ASSERT(length <= UINT32_CHAR_BUFFER_LENGTH);
+
+ RangedPtr<const CharT> cp(s, length);
+ const RangedPtr<const CharT> end(s + length, s, length);
+
+ MOZ_ASSERT(IsAsciiDigit(*cp));
+ uint32_t index = AsciiDigitToNumber(*cp++);
+ MOZ_ASSERT(index != 0);
+
+ while (cp < end) {
+ MOZ_ASSERT(IsAsciiDigit(*cp));
+ index = 10 * index + AsciiDigitToNumber(*cp);
+ cp++;
+ }
+
+ MOZ_ASSERT(index <= MAX_ARRAY_INDEX);
+ return index;
+}
+
+uint32_t JSAtom::getIndexSlow() const {
+ MOZ_ASSERT(isIndex());
+ MOZ_ASSERT(!hasIndexValue());
+
+ size_t len = length();
+
+ AutoCheckCannotGC nogc;
+ return hasLatin1Chars() ? AtomCharsToIndex(latin1Chars(nogc), len)
+ : AtomCharsToIndex(twoByteChars(nogc), len);
+}
+
+static void MarkStringAndBasesNonDeduplicatable(JSLinearString* s) {
+ while (true) {
+ if (!s->isTenured()) {
+ s->setNonDeduplicatable();
+ }
+ if (!s->hasBase()) {
+ break;
+ }
+ s = s->base();
+ }
+}
+
+bool AutoStableStringChars::init(JSContext* cx, JSString* s) {
+ Rooted<JSLinearString*> linearString(cx, s->ensureLinear(cx));
+ if (!linearString) {
+ return false;
+ }
+
+ MOZ_ASSERT(state_ == Uninitialized);
+
+ // If the chars are inline then we need to copy them since they may be moved
+ // by a compacting GC.
+ if (baseIsInline(linearString)) {
+ return linearString->hasTwoByteChars() ? copyTwoByteChars(cx, linearString)
+ : copyLatin1Chars(cx, linearString);
+ }
+
+ if (linearString->hasLatin1Chars()) {
+ state_ = Latin1;
+ latin1Chars_ = linearString->rawLatin1Chars();
+ } else {
+ state_ = TwoByte;
+ twoByteChars_ = linearString->rawTwoByteChars();
+ }
+
+ MarkStringAndBasesNonDeduplicatable(linearString);
+
+ s_ = linearString;
+ return true;
+}
+
+bool AutoStableStringChars::initTwoByte(JSContext* cx, JSString* s) {
+ Rooted<JSLinearString*> linearString(cx, s->ensureLinear(cx));
+ if (!linearString) {
+ return false;
+ }
+
+ MOZ_ASSERT(state_ == Uninitialized);
+
+ if (linearString->hasLatin1Chars()) {
+ return copyAndInflateLatin1Chars(cx, linearString);
+ }
+
+ // If the chars are inline then we need to copy them since they may be moved
+ // by a compacting GC.
+ if (baseIsInline(linearString)) {
+ return copyTwoByteChars(cx, linearString);
+ }
+
+ state_ = TwoByte;
+ twoByteChars_ = linearString->rawTwoByteChars();
+
+ MarkStringAndBasesNonDeduplicatable(linearString);
+
+ s_ = linearString;
+ return true;
+}
+
+bool AutoStableStringChars::baseIsInline(Handle<JSLinearString*> linearString) {
+ JSString* base = linearString;
+ while (base->isDependent()) {
+ base = base->asDependent().base();
+ }
+ return base->isInline();
+}
+
+template <typename T>
+T* AutoStableStringChars::allocOwnChars(JSContext* cx, size_t count) {
+ static_assert(
+ InlineCapacity >=
+ sizeof(JS::Latin1Char) * JSFatInlineString::MAX_LENGTH_LATIN1 &&
+ InlineCapacity >=
+ sizeof(char16_t) * JSFatInlineString::MAX_LENGTH_TWO_BYTE,
+ "InlineCapacity too small to hold fat inline strings");
+
+ static_assert((JSString::MAX_LENGTH &
+ mozilla::tl::MulOverflowMask<sizeof(T)>::value) == 0,
+ "Size calculation can overflow");
+ MOZ_ASSERT(count <= JSString::MAX_LENGTH);
+ size_t size = sizeof(T) * count;
+
+ ownChars_.emplace(cx);
+ if (!ownChars_->resize(size)) {
+ ownChars_.reset();
+ return nullptr;
+ }
+
+ return reinterpret_cast<T*>(ownChars_->begin());
+}
+
+bool AutoStableStringChars::copyAndInflateLatin1Chars(
+ JSContext* cx, Handle<JSLinearString*> linearString) {
+ char16_t* chars = allocOwnChars<char16_t>(cx, linearString->length());
+ if (!chars) {
+ return false;
+ }
+
+ FillChars(chars, linearString->rawLatin1Chars(), linearString->length());
+
+ state_ = TwoByte;
+ twoByteChars_ = chars;
+ s_ = linearString;
+ return true;
+}
+
+bool AutoStableStringChars::copyLatin1Chars(
+ JSContext* cx, Handle<JSLinearString*> linearString) {
+ size_t length = linearString->length();
+ JS::Latin1Char* chars = allocOwnChars<JS::Latin1Char>(cx, length);
+ if (!chars) {
+ return false;
+ }
+
+ FillChars(chars, linearString->rawLatin1Chars(), length);
+
+ state_ = Latin1;
+ latin1Chars_ = chars;
+ s_ = linearString;
+ return true;
+}
+
+bool AutoStableStringChars::copyTwoByteChars(
+ JSContext* cx, Handle<JSLinearString*> linearString) {
+ size_t length = linearString->length();
+ char16_t* chars = allocOwnChars<char16_t>(cx, length);
+ if (!chars) {
+ return false;
+ }
+
+ FillChars(chars, linearString->rawTwoByteChars(), length);
+
+ state_ = TwoByte;
+ twoByteChars_ = chars;
+ s_ = linearString;
+ return true;
+}
+
+template <>
+bool JS::SourceText<char16_t>::initMaybeBorrowed(
+ JSContext* cx, JS::AutoStableStringChars& linearChars) {
+ MOZ_ASSERT(linearChars.isTwoByte(),
+ "AutoStableStringChars must be initialized with char16_t");
+
+ const char16_t* chars = linearChars.twoByteChars();
+ size_t length = linearChars.length();
+ JS::SourceOwnership ownership = linearChars.maybeGiveOwnershipToCaller()
+ ? JS::SourceOwnership::TakeOwnership
+ : JS::SourceOwnership::Borrowed;
+ return initImpl(cx, chars, length, ownership);
+}
+
+template <>
+bool JS::SourceText<char16_t>::initMaybeBorrowed(
+ JS::FrontendContext* fc, JS::AutoStableStringChars& linearChars) {
+ MOZ_ASSERT(linearChars.isTwoByte(),
+ "AutoStableStringChars must be initialized with char16_t");
+
+ const char16_t* chars = linearChars.twoByteChars();
+ size_t length = linearChars.length();
+ JS::SourceOwnership ownership = linearChars.maybeGiveOwnershipToCaller()
+ ? JS::SourceOwnership::TakeOwnership
+ : JS::SourceOwnership::Borrowed;
+ return initImpl(fc, chars, length, ownership);
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+void JSAtom::dump(js::GenericPrinter& out) {
+ out.printf("JSAtom* (%p) = ", (void*)this);
+ this->JSString::dump(out);
+}
+
+void JSAtom::dump() {
+ Fprinter out(stderr);
+ dump(out);
+}
+
+void JSExternalString::dumpRepresentation(js::GenericPrinter& out,
+ int indent) const {
+ dumpRepresentationHeader(out, "JSExternalString");
+ indent += 2;
+
+ out.printf("%*sfinalizer: ((JSExternalStringCallbacks*) %p)\n", indent, "",
+ callbacks());
+ dumpRepresentationChars(out, indent);
+}
+#endif /* defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW) */
+
+JSLinearString* js::NewDependentString(JSContext* cx, JSString* baseArg,
+ size_t start, size_t length,
+ gc::Heap heap) {
+ if (length == 0) {
+ return cx->emptyString();
+ }
+
+ JSLinearString* base = baseArg->ensureLinear(cx);
+ if (!base) {
+ return nullptr;
+ }
+
+ if (start == 0 && length == base->length()) {
+ return base;
+ }
+
+ if (base->hasTwoByteChars()) {
+ AutoCheckCannotGC nogc;
+ const char16_t* chars = base->twoByteChars(nogc) + start;
+ if (JSLinearString* staticStr = cx->staticStrings().lookup(chars, length)) {
+ return staticStr;
+ }
+ } else {
+ AutoCheckCannotGC nogc;
+ const Latin1Char* chars = base->latin1Chars(nogc) + start;
+ if (JSLinearString* staticStr = cx->staticStrings().lookup(chars, length)) {
+ return staticStr;
+ }
+ }
+
+ return JSDependentString::new_(cx, base, start, length, heap);
+}
+
+static inline bool CanStoreCharsAsLatin1(const char16_t* s, size_t length) {
+ return IsUtf16Latin1(Span(s, length));
+}
+
+template <AllowGC allowGC>
+static MOZ_ALWAYS_INLINE JSInlineString* NewInlineStringDeflated(
+ JSContext* cx, const mozilla::Range<const char16_t>& chars,
+ gc::Heap heap = gc::Heap::Default) {
+ size_t len = chars.length();
+ Latin1Char* storage;
+ JSInlineString* str = AllocateInlineString<allowGC>(cx, len, &storage, heap);
+ if (!str) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(CanStoreCharsAsLatin1(chars.begin().get(), len));
+ FillFromCompatible(storage, chars.begin().get(), len);
+ return str;
+}
+
+template <AllowGC allowGC>
+static JSLinearString* NewStringDeflated(JSContext* cx, const char16_t* s,
+ size_t n, gc::Heap heap) {
+ if (JSLinearString* str = TryEmptyOrStaticString(cx, s, n)) {
+ return str;
+ }
+
+ if (JSInlineString::lengthFits<Latin1Char>(n)) {
+ return NewInlineStringDeflated<allowGC>(
+ cx, mozilla::Range<const char16_t>(s, n), heap);
+ }
+
+ auto news = cx->make_pod_arena_array<Latin1Char>(js::StringBufferArena, n);
+ if (!news) {
+ if (!allowGC) {
+ cx->recoverFromOutOfMemory();
+ }
+ return nullptr;
+ }
+
+ MOZ_ASSERT(CanStoreCharsAsLatin1(s, n));
+ FillFromCompatible(news.get(), s, n);
+
+ return JSLinearString::new_<allowGC>(cx, std::move(news), n, heap);
+}
+
+static MOZ_ALWAYS_INLINE JSAtom* NewInlineAtomDeflated(JSContext* cx,
+ const char16_t* chars,
+ size_t length,
+ js::HashNumber hash) {
+ Latin1Char* storage;
+ JSAtom* str = AllocateInlineAtom(cx, length, &storage, hash);
+ if (!str) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(CanStoreCharsAsLatin1(chars, length));
+ FillFromCompatible(storage, chars, length);
+ return str;
+}
+
+static JSAtom* NewAtomDeflatedValidLength(JSContext* cx, const char16_t* s,
+ size_t n, js::HashNumber hash) {
+ if (JSInlineString::lengthFits<Latin1Char>(n)) {
+ return NewInlineAtomDeflated(cx, s, n, hash);
+ }
+
+ auto news = cx->make_pod_arena_array<Latin1Char>(js::StringBufferArena, n);
+ if (!news) {
+ cx->recoverFromOutOfMemory();
+ return nullptr;
+ }
+
+ MOZ_ASSERT(CanStoreCharsAsLatin1(s, n));
+ FillFromCompatible(news.get(), s, n);
+
+ return JSAtom::newValidLength(cx, std::move(news), n, hash);
+}
+
+template <AllowGC allowGC, typename CharT>
+JSLinearString* js::NewStringDontDeflate(
+ JSContext* cx, UniquePtr<CharT[], JS::FreePolicy> chars, size_t length,
+ gc::Heap heap) {
+ if (JSLinearString* str = TryEmptyOrStaticString(cx, chars.get(), length)) {
+ return str;
+ }
+
+ if (JSInlineString::lengthFits<CharT>(length)) {
+ // |chars.get()| is safe because 1) |NewInlineString| necessarily *copies*,
+ // and 2) |chars| frees its contents only when this function returns.
+ return NewInlineString<allowGC>(
+ cx, mozilla::Range<const CharT>(chars.get(), length), heap);
+ }
+
+ return JSLinearString::new_<allowGC>(cx, std::move(chars), length, heap);
+}
+
+template JSLinearString* js::NewStringDontDeflate<CanGC>(
+ JSContext* cx, UniqueTwoByteChars chars, size_t length, gc::Heap heap);
+
+template JSLinearString* js::NewStringDontDeflate<NoGC>(
+ JSContext* cx, UniqueTwoByteChars chars, size_t length, gc::Heap heap);
+
+template JSLinearString* js::NewStringDontDeflate<CanGC>(
+ JSContext* cx, UniqueLatin1Chars chars, size_t length, gc::Heap heap);
+
+template JSLinearString* js::NewStringDontDeflate<NoGC>(JSContext* cx,
+ UniqueLatin1Chars chars,
+ size_t length,
+ gc::Heap heap);
+
+template <AllowGC allowGC, typename CharT>
+JSLinearString* js::NewString(JSContext* cx,
+ UniquePtr<CharT[], JS::FreePolicy> chars,
+ size_t length, gc::Heap heap) {
+ if constexpr (std::is_same_v<CharT, char16_t>) {
+ if (CanStoreCharsAsLatin1(chars.get(), length)) {
+ // Deflating copies from |chars.get()| and lets |chars| be freed on
+ // return.
+ return NewStringDeflated<allowGC>(cx, chars.get(), length, heap);
+ }
+ }
+
+ return NewStringDontDeflate<allowGC>(cx, std::move(chars), length, heap);
+}
+
+template JSLinearString* js::NewString<CanGC>(JSContext* cx,
+ UniqueTwoByteChars chars,
+ size_t length, gc::Heap heap);
+
+template JSLinearString* js::NewString<NoGC>(JSContext* cx,
+ UniqueTwoByteChars chars,
+ size_t length, gc::Heap heap);
+
+template JSLinearString* js::NewString<CanGC>(JSContext* cx,
+ UniqueLatin1Chars chars,
+ size_t length, gc::Heap heap);
+
+template JSLinearString* js::NewString<NoGC>(JSContext* cx,
+ UniqueLatin1Chars chars,
+ size_t length, gc::Heap heap);
+
+namespace js {
+
+template <AllowGC allowGC, typename CharT>
+JSLinearString* NewStringCopyNDontDeflateNonStaticValidLength(JSContext* cx,
+ const CharT* s,
+ size_t n,
+ gc::Heap heap) {
+ if (JSInlineString::lengthFits<CharT>(n)) {
+ return NewInlineString<allowGC>(cx, mozilla::Range<const CharT>(s, n),
+ heap);
+ }
+
+ auto news = cx->make_pod_arena_array<CharT>(js::StringBufferArena, n);
+ if (!news) {
+ if (!allowGC) {
+ cx->recoverFromOutOfMemory();
+ }
+ return nullptr;
+ }
+
+ FillChars(news.get(), s, n);
+
+ return JSLinearString::newValidLength<allowGC>(cx, std::move(news), n, heap);
+}
+
+template JSLinearString* NewStringCopyNDontDeflateNonStaticValidLength<CanGC>(
+ JSContext* cx, const char16_t* s, size_t n, gc::Heap heap);
+
+template JSLinearString* NewStringCopyNDontDeflateNonStaticValidLength<CanGC>(
+ JSContext* cx, const Latin1Char* s, size_t n, gc::Heap heap);
+
+template <AllowGC allowGC, typename CharT>
+JSLinearString* NewStringCopyNDontDeflate(JSContext* cx, const CharT* s,
+ size_t n, gc::Heap heap) {
+ if (JSLinearString* str = TryEmptyOrStaticString(cx, s, n)) {
+ return str;
+ }
+
+ if (MOZ_UNLIKELY(!JSLinearString::validateLength(cx, n))) {
+ return nullptr;
+ }
+
+ return NewStringCopyNDontDeflateNonStaticValidLength<allowGC>(cx, s, n, heap);
+}
+
+template JSLinearString* NewStringCopyNDontDeflate<CanGC>(JSContext* cx,
+ const char16_t* s,
+ size_t n,
+ gc::Heap heap);
+
+template JSLinearString* NewStringCopyNDontDeflate<NoGC>(JSContext* cx,
+ const char16_t* s,
+ size_t n,
+ gc::Heap heap);
+
+template JSLinearString* NewStringCopyNDontDeflate<CanGC>(JSContext* cx,
+ const Latin1Char* s,
+ size_t n,
+ gc::Heap heap);
+
+template JSLinearString* NewStringCopyNDontDeflate<NoGC>(JSContext* cx,
+ const Latin1Char* s,
+ size_t n,
+ gc::Heap heap);
+
+JSLinearString* NewLatin1StringZ(JSContext* cx, UniqueChars chars,
+ gc::Heap heap) {
+ size_t length = strlen(chars.get());
+ UniqueLatin1Chars latin1(reinterpret_cast<Latin1Char*>(chars.release()));
+ return NewString<CanGC>(cx, std::move(latin1), length, heap);
+}
+
+template <AllowGC allowGC, typename CharT>
+JSLinearString* NewStringCopyN(JSContext* cx, const CharT* s, size_t n,
+ gc::Heap heap) {
+ if constexpr (std::is_same_v<CharT, char16_t>) {
+ if (CanStoreCharsAsLatin1(s, n)) {
+ return NewStringDeflated<allowGC>(cx, s, n, heap);
+ }
+ }
+
+ return NewStringCopyNDontDeflate<allowGC>(cx, s, n, heap);
+}
+
+template JSLinearString* NewStringCopyN<CanGC>(JSContext* cx, const char16_t* s,
+ size_t n, gc::Heap heap);
+
+template JSLinearString* NewStringCopyN<NoGC>(JSContext* cx, const char16_t* s,
+ size_t n, gc::Heap heap);
+
+template JSLinearString* NewStringCopyN<CanGC>(JSContext* cx,
+ const Latin1Char* s, size_t n,
+ gc::Heap heap);
+
+template JSLinearString* NewStringCopyN<NoGC>(JSContext* cx,
+ const Latin1Char* s, size_t n,
+ gc::Heap heap);
+
+template <typename CharT>
+JSAtom* NewAtomCopyNDontDeflateValidLength(JSContext* cx, const CharT* s,
+ size_t n, js::HashNumber hash) {
+ if constexpr (std::is_same_v<CharT, char16_t>) {
+ MOZ_ASSERT(!CanStoreCharsAsLatin1(s, n));
+ }
+
+ if (JSInlineString::lengthFits<CharT>(n)) {
+ return NewInlineAtom(cx, s, n, hash);
+ }
+
+ auto news = cx->make_pod_arena_array<CharT>(js::StringBufferArena, n);
+ if (!news) {
+ cx->recoverFromOutOfMemory();
+ return nullptr;
+ }
+
+ FillChars(news.get(), s, n);
+
+ return JSAtom::newValidLength(cx, std::move(news), n, hash);
+}
+
+template JSAtom* NewAtomCopyNDontDeflateValidLength(JSContext* cx,
+ const char16_t* s, size_t n,
+ js::HashNumber hash);
+
+template JSAtom* NewAtomCopyNDontDeflateValidLength(JSContext* cx,
+ const Latin1Char* s,
+ size_t n,
+ js::HashNumber hash);
+
+template <typename CharT>
+JSAtom* NewAtomCopyNMaybeDeflateValidLength(JSContext* cx, const CharT* s,
+ size_t n, js::HashNumber hash) {
+ if constexpr (std::is_same_v<CharT, char16_t>) {
+ if (CanStoreCharsAsLatin1(s, n)) {
+ return NewAtomDeflatedValidLength(cx, s, n, hash);
+ }
+ }
+
+ return NewAtomCopyNDontDeflateValidLength(cx, s, n, hash);
+}
+
+template JSAtom* NewAtomCopyNMaybeDeflateValidLength(JSContext* cx,
+ const char16_t* s,
+ size_t n,
+ js::HashNumber hash);
+
+template JSAtom* NewAtomCopyNMaybeDeflateValidLength(JSContext* cx,
+ const Latin1Char* s,
+ size_t n,
+ js::HashNumber hash);
+
+JSLinearString* NewStringCopyUTF8N(JSContext* cx, const JS::UTF8Chars utf8,
+ gc::Heap heap) {
+ JS::SmallestEncoding encoding = JS::FindSmallestEncoding(utf8);
+ if (encoding == JS::SmallestEncoding::ASCII) {
+ return NewStringCopyN<js::CanGC>(cx, utf8.begin().get(), utf8.length(),
+ heap);
+ }
+
+ size_t length;
+ if (encoding == JS::SmallestEncoding::Latin1) {
+ UniqueLatin1Chars latin1(
+ UTF8CharsToNewLatin1CharsZ(cx, utf8, &length, js::StringBufferArena)
+ .get());
+ if (!latin1) {
+ return nullptr;
+ }
+
+ return NewString<js::CanGC>(cx, std::move(latin1), length, heap);
+ }
+
+ MOZ_ASSERT(encoding == JS::SmallestEncoding::UTF16);
+
+ UniqueTwoByteChars utf16(
+ UTF8CharsToNewTwoByteCharsZ(cx, utf8, &length, js::StringBufferArena)
+ .get());
+ if (!utf16) {
+ return nullptr;
+ }
+
+ return NewString<js::CanGC>(cx, std::move(utf16), length, heap);
+}
+
+MOZ_ALWAYS_INLINE JSString* ExternalStringCache::lookup(const char16_t* chars,
+ size_t len) const {
+ AutoCheckCannotGC nogc;
+
+ for (size_t i = 0; i < NumEntries; i++) {
+ JSString* str = entries_[i];
+ if (!str || str->length() != len) {
+ continue;
+ }
+
+ const char16_t* strChars = str->asLinear().nonInlineTwoByteChars(nogc);
+ if (chars == strChars) {
+ // Note that we don't need an incremental barrier here or below.
+ // The cache is purged on GC so any string we get from the cache
+ // must have been allocated after the GC started.
+ return str;
+ }
+
+ // Compare the chars. Don't do this for long strings as it will be
+ // faster to allocate a new external string.
+ static const size_t MaxLengthForCharComparison = 100;
+ if (len <= MaxLengthForCharComparison && EqualChars(chars, strChars, len)) {
+ return str;
+ }
+ }
+
+ return nullptr;
+}
+
+MOZ_ALWAYS_INLINE void ExternalStringCache::put(JSString* str) {
+ MOZ_ASSERT(str->isExternal());
+
+ for (size_t i = NumEntries - 1; i > 0; i--) {
+ entries_[i] = entries_[i - 1];
+ }
+
+ entries_[0] = str;
+}
+
+JSString* NewMaybeExternalString(JSContext* cx, const char16_t* s, size_t n,
+ const JSExternalStringCallbacks* callbacks,
+ bool* allocatedExternal, gc::Heap heap) {
+ if (JSString* str = TryEmptyOrStaticString(cx, s, n)) {
+ *allocatedExternal = false;
+ return str;
+ }
+
+ if (JSThinInlineString::lengthFits<Latin1Char>(n) &&
+ CanStoreCharsAsLatin1(s, n)) {
+ *allocatedExternal = false;
+ return NewInlineStringDeflated<AllowGC::CanGC>(
+ cx, mozilla::Range<const char16_t>(s, n), heap);
+ }
+
+ ExternalStringCache& cache = cx->zone()->externalStringCache();
+ if (JSString* str = cache.lookup(s, n)) {
+ *allocatedExternal = false;
+ return str;
+ }
+
+ JSString* str = JSExternalString::new_(cx, s, n, callbacks);
+ if (!str) {
+ return nullptr;
+ }
+
+ *allocatedExternal = true;
+ cache.put(str);
+ return str;
+}
+
+} /* namespace js */
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+void JSExtensibleString::dumpRepresentation(js::GenericPrinter& out,
+ int indent) const {
+ dumpRepresentationHeader(out, "JSExtensibleString");
+ indent += 2;
+
+ out.printf("%*scapacity: %zu\n", indent, "", capacity());
+ dumpRepresentationChars(out, indent);
+}
+
+void JSInlineString::dumpRepresentation(js::GenericPrinter& out,
+ int indent) const {
+ dumpRepresentationHeader(
+ out, isFatInline() ? "JSFatInlineString" : "JSThinInlineString");
+ indent += 2;
+
+ dumpRepresentationChars(out, indent);
+}
+
+void JSLinearString::dumpRepresentation(js::GenericPrinter& out,
+ int indent) const {
+ dumpRepresentationHeader(out, "JSLinearString");
+ indent += 2;
+
+ dumpRepresentationChars(out, indent);
+}
+#endif
+
+struct RepresentativeExternalString : public JSExternalStringCallbacks {
+ void finalize(char16_t* chars) const override {
+ // Constant chars, nothing to do.
+ }
+ size_t sizeOfBuffer(const char16_t* chars,
+ mozilla::MallocSizeOf mallocSizeOf) const override {
+ // This string's buffer is not heap-allocated, so its malloc size is 0.
+ return 0;
+ }
+};
+
+static const RepresentativeExternalString RepresentativeExternalStringCallbacks;
+
+template <typename CheckString, typename CharT>
+static bool FillWithRepresentatives(JSContext* cx, Handle<ArrayObject*> array,
+ uint32_t* index, const CharT* chars,
+ size_t len, size_t fatInlineMaxLength,
+ const CheckString& check, gc::Heap heap) {
+ auto AppendString = [&check](JSContext* cx, Handle<ArrayObject*> array,
+ uint32_t* index, HandleString s) {
+ MOZ_ASSERT(check(s));
+ (void)check; // silence clang -Wunused-lambda-capture in opt builds
+ RootedValue val(cx, StringValue(s));
+ return JS_DefineElement(cx, array, (*index)++, val, 0);
+ };
+
+ MOZ_ASSERT(len > fatInlineMaxLength);
+
+ // Normal atom.
+ RootedString atom1(cx, AtomizeChars(cx, chars, len));
+ if (!atom1 || !AppendString(cx, array, index, atom1)) {
+ return false;
+ }
+ MOZ_ASSERT(atom1->isAtom());
+
+ // Inline atom.
+ RootedString atom2(cx, AtomizeChars(cx, chars, 2));
+ if (!atom2 || !AppendString(cx, array, index, atom2)) {
+ return false;
+ }
+ MOZ_ASSERT(atom2->isAtom());
+ MOZ_ASSERT(atom2->isInline());
+
+ // Fat inline atom.
+ RootedString atom3(cx, AtomizeChars(cx, chars, fatInlineMaxLength));
+ if (!atom3 || !AppendString(cx, array, index, atom3)) {
+ return false;
+ }
+ MOZ_ASSERT(atom3->isAtom());
+ MOZ_ASSERT(atom3->isFatInline());
+
+ // Normal linear string; maybe nursery.
+ RootedString linear1(cx, NewStringCopyN<CanGC>(cx, chars, len, heap));
+ if (!linear1 || !AppendString(cx, array, index, linear1)) {
+ return false;
+ }
+ MOZ_ASSERT(linear1->isLinear());
+
+ // Inline string; maybe nursery.
+ RootedString linear2(cx, NewStringCopyN<CanGC>(cx, chars, 3, heap));
+ if (!linear2 || !AppendString(cx, array, index, linear2)) {
+ return false;
+ }
+ MOZ_ASSERT(linear2->isLinear());
+ MOZ_ASSERT(linear2->isInline());
+
+ // Fat inline string; maybe nursery.
+ RootedString linear3(
+ cx, NewStringCopyN<CanGC>(cx, chars, fatInlineMaxLength, heap));
+ if (!linear3 || !AppendString(cx, array, index, linear3)) {
+ return false;
+ }
+ MOZ_ASSERT(linear3->isLinear());
+ MOZ_ASSERT(linear3->isFatInline());
+
+ // Rope; maybe nursery.
+ RootedString rope(cx, ConcatStrings<CanGC>(cx, atom1, atom3, heap));
+ if (!rope || !AppendString(cx, array, index, rope)) {
+ return false;
+ }
+ MOZ_ASSERT(rope->isRope());
+
+ // Dependent; maybe nursery.
+ RootedString dep(cx, NewDependentString(cx, atom1, 0, len - 2, heap));
+ if (!dep || !AppendString(cx, array, index, dep)) {
+ return false;
+ }
+ MOZ_ASSERT(dep->isDependent());
+
+ // Extensible; maybe nursery.
+ RootedString temp1(cx, NewStringCopyN<CanGC>(cx, chars, len, heap));
+ if (!temp1) {
+ return false;
+ }
+ RootedString extensible(cx, ConcatStrings<CanGC>(cx, temp1, atom3, heap));
+ if (!extensible || !extensible->ensureLinear(cx)) {
+ return false;
+ }
+ if (!AppendString(cx, array, index, extensible)) {
+ return false;
+ }
+ MOZ_ASSERT(extensible->isExtensible());
+
+ // External. Note that we currently only support TwoByte external strings.
+ RootedString external1(cx), external2(cx);
+ if constexpr (std::is_same_v<CharT, char16_t>) {
+ external1 = JS_NewExternalString(cx, (const char16_t*)chars, len,
+ &RepresentativeExternalStringCallbacks);
+ if (!external1 || !AppendString(cx, array, index, external1)) {
+ return false;
+ }
+ MOZ_ASSERT(external1->isExternal());
+
+ external2 = JS_NewExternalString(cx, (const char16_t*)chars, 2,
+ &RepresentativeExternalStringCallbacks);
+ if (!external2 || !AppendString(cx, array, index, external2)) {
+ return false;
+ }
+ MOZ_ASSERT(external2->isExternal());
+ }
+
+ // Assert the strings still have the types we expect after creating the
+ // other strings.
+
+ MOZ_ASSERT(atom1->isAtom());
+ MOZ_ASSERT(atom2->isAtom());
+ MOZ_ASSERT(atom3->isAtom());
+ MOZ_ASSERT(atom2->isInline());
+ MOZ_ASSERT(atom3->isFatInline());
+
+ MOZ_ASSERT(linear1->isLinear());
+ MOZ_ASSERT(linear2->isLinear());
+ MOZ_ASSERT(linear3->isLinear());
+ MOZ_ASSERT(linear2->isInline());
+ MOZ_ASSERT(linear3->isFatInline());
+
+ MOZ_ASSERT(rope->isRope());
+ MOZ_ASSERT(dep->isDependent());
+ MOZ_ASSERT(extensible->isExtensible());
+ MOZ_ASSERT_IF(external1, external1->isExternal());
+ MOZ_ASSERT_IF(external2, external2->isExternal());
+ return true;
+}
+
+/* static */
+bool JSString::fillWithRepresentatives(JSContext* cx,
+ Handle<ArrayObject*> array) {
+ uint32_t index = 0;
+
+ auto CheckTwoByte = [](JSString* str) { return str->hasTwoByteChars(); };
+ auto CheckLatin1 = [](JSString* str) { return str->hasLatin1Chars(); };
+
+ static const char16_t twoByteChars[] =
+ u"\u1234abc\0def\u5678ghijklmasdfa\0xyz0123456789";
+ static const Latin1Char latin1Chars[] = "abc\0defghijklmasdfa\0xyz0123456789";
+
+ // Create strings using both the default heap and forcing the tenured heap. If
+ // nursery strings are available, this is a best effort at creating them in
+ // the default heap case. Since nursery strings may be disabled or a GC may
+ // occur during this process, there may be duplicate representatives in the
+ // final list.
+
+ if (!FillWithRepresentatives(cx, array, &index, twoByteChars,
+ std::size(twoByteChars) - 1,
+ JSFatInlineString::MAX_LENGTH_TWO_BYTE,
+ CheckTwoByte, gc::Heap::Tenured)) {
+ return false;
+ }
+ if (!FillWithRepresentatives(cx, array, &index, latin1Chars,
+ std::size(latin1Chars) - 1,
+ JSFatInlineString::MAX_LENGTH_LATIN1,
+ CheckLatin1, gc::Heap::Tenured)) {
+ return false;
+ }
+ if (!FillWithRepresentatives(cx, array, &index, twoByteChars,
+ std::size(twoByteChars) - 1,
+ JSFatInlineString::MAX_LENGTH_TWO_BYTE,
+ CheckTwoByte, gc::Heap::Default)) {
+ return false;
+ }
+ if (!FillWithRepresentatives(cx, array, &index, latin1Chars,
+ std::size(latin1Chars) - 1,
+ JSFatInlineString::MAX_LENGTH_LATIN1,
+ CheckLatin1, gc::Heap::Default)) {
+ return false;
+ }
+
+ MOZ_ASSERT(index == 40);
+
+ return true;
+}
+
+/*** Conversions ************************************************************/
+
+UniqueChars js::EncodeLatin1(JSContext* cx, JSString* str) {
+ JSLinearString* linear = str->ensureLinear(cx);
+ if (!linear) {
+ return nullptr;
+ }
+
+ JS::AutoCheckCannotGC nogc;
+ if (linear->hasTwoByteChars()) {
+ JS::Latin1CharsZ chars =
+ JS::LossyTwoByteCharsToNewLatin1CharsZ(cx, linear->twoByteRange(nogc));
+ return UniqueChars(chars.c_str());
+ }
+
+ size_t len = str->length();
+ Latin1Char* buf = cx->pod_malloc<Latin1Char>(len + 1);
+ if (!buf) {
+ return nullptr;
+ }
+
+ FillChars(buf, linear->latin1Chars(nogc), len);
+ buf[len] = '\0';
+
+ return UniqueChars(reinterpret_cast<char*>(buf));
+}
+
+UniqueChars js::EncodeAscii(JSContext* cx, JSString* str) {
+ JSLinearString* linear = str->ensureLinear(cx);
+ if (!linear) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(StringIsAscii(linear));
+ return EncodeLatin1(cx, linear);
+}
+
+UniqueChars js::IdToPrintableUTF8(JSContext* cx, HandleId id,
+ IdToPrintableBehavior behavior) {
+ // ToString(<symbol>) throws a TypeError, therefore require that callers
+ // request source representation when |id| is a property key.
+ MOZ_ASSERT_IF(
+ behavior == IdToPrintableBehavior::IdIsIdentifier,
+ id.isAtom() && frontend::IsIdentifierNameOrPrivateName(id.toAtom()));
+
+ RootedValue v(cx, IdToValue(id));
+ JSString* str;
+ if (behavior == IdToPrintableBehavior::IdIsPropertyKey) {
+ str = ValueToSource(cx, v);
+ } else {
+ str = ToString<CanGC>(cx, v);
+ }
+ if (!str) {
+ return nullptr;
+ }
+ return StringToNewUTF8CharsZ(cx, *str);
+}
+
+template <AllowGC allowGC>
+JSString* js::ToStringSlow(
+ JSContext* cx, typename MaybeRooted<Value, allowGC>::HandleType arg) {
+ /* As with ToObjectSlow, callers must verify that |arg| isn't a string. */
+ MOZ_ASSERT(!arg.isString());
+
+ Value v = arg;
+ if (!v.isPrimitive()) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ if (!allowGC) {
+ return nullptr;
+ }
+ RootedValue v2(cx, v);
+ if (!ToPrimitive(cx, JSTYPE_STRING, &v2)) {
+ return nullptr;
+ }
+ v = v2;
+ }
+
+ JSString* str;
+ if (v.isString()) {
+ str = v.toString();
+ } else if (v.isInt32()) {
+ str = Int32ToString<allowGC>(cx, v.toInt32());
+ } else if (v.isDouble()) {
+ str = NumberToString<allowGC>(cx, v.toDouble());
+ } else if (v.isBoolean()) {
+ str = BooleanToString(cx, v.toBoolean());
+ } else if (v.isNull()) {
+ str = cx->names().null;
+ } else if (v.isSymbol()) {
+ MOZ_ASSERT(!cx->isHelperThreadContext());
+ if (allowGC) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SYMBOL_TO_STRING);
+ }
+ return nullptr;
+ } else if (v.isBigInt()) {
+ if (!allowGC) {
+ return nullptr;
+ }
+ RootedBigInt i(cx, v.toBigInt());
+ str = BigInt::toString<CanGC>(cx, i, 10);
+ }
+#ifdef ENABLE_RECORD_TUPLE
+ else if (v.isExtendedPrimitive()) {
+ if (!allowGC) {
+ return nullptr;
+ }
+ if (IsTuple(v)) {
+ Rooted<TupleType*> tup(cx, &TupleType::thisTupleValue(v));
+ return TupleToSource(cx, tup);
+ }
+ Rooted<RecordType*> rec(cx);
+ MOZ_ALWAYS_TRUE(RecordObject::maybeUnbox(&v.getObjectPayload(), &rec));
+ return RecordToSource(cx, rec);
+ }
+#endif
+ else {
+ MOZ_ASSERT(v.isUndefined());
+ str = cx->names().undefined;
+ }
+ return str;
+}
+
+template JSString* js::ToStringSlow<CanGC>(JSContext* cx, HandleValue arg);
+
+template JSString* js::ToStringSlow<NoGC>(JSContext* cx, const Value& arg);
+
+JS_PUBLIC_API JSString* js::ToStringSlow(JSContext* cx, HandleValue v) {
+ return ToStringSlow<CanGC>(cx, v);
+}
diff --git a/js/src/vm/StringType.h b/js/src/vm/StringType.h
new file mode 100644
index 0000000000..7d592bee35
--- /dev/null
+++ b/js/src/vm/StringType.h
@@ -0,0 +1,2052 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_StringType_h
+#define vm_StringType_h
+
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Range.h"
+#include "mozilla/Span.h"
+#include "mozilla/TextUtils.h"
+
+#include <string_view> // std::basic_string_view
+
+#include "jstypes.h" // js::Bit
+
+#include "gc/Allocator.h"
+#include "gc/Cell.h"
+#include "gc/MaybeRooted.h"
+#include "gc/Nursery.h"
+#include "gc/RelocationOverlay.h"
+#include "gc/StoreBuffer.h"
+#include "js/CharacterEncoding.h"
+#include "js/RootingAPI.h"
+#include "js/shadow/String.h" // JS::shadow::String
+#include "js/String.h" // JS::MaxStringLength
+#include "js/UniquePtr.h"
+#include "util/Text.h"
+
+class JSDependentString;
+class JSExtensibleString;
+class JSExternalString;
+class JSInlineString;
+class JSRope;
+
+namespace JS {
+class JS_PUBLIC_API AutoStableStringChars;
+} // namespace JS
+
+namespace js {
+
+class ArrayObject;
+class GenericPrinter;
+class PropertyName;
+class StringBuffer;
+
+namespace frontend {
+class ParserAtomsTable;
+class TaggedParserAtomIndex;
+class WellKnownParserAtoms;
+struct CompilationAtomCache;
+} // namespace frontend
+
+namespace jit {
+class MacroAssembler;
+} // namespace jit
+
+/* The buffer length required to contain any unsigned 32-bit integer. */
+static const size_t UINT32_CHAR_BUFFER_LENGTH = sizeof("4294967295") - 1;
+
+// Maximum array index. This value is defined in the spec (ES2021 draft, 6.1.7):
+//
+// An array index is an integer index whose numeric value i is in the range
+// +0𝔽 ≤ i < 𝔽(2^32 - 1).
+const uint32_t MAX_ARRAY_INDEX = 4294967294u; // 2^32-2 (= UINT32_MAX-1)
+
+// Returns true if the characters of `s` store an unsigned 32-bit integer value
+// less than or equal to MAX_ARRAY_INDEX, initializing `*indexp` to that value
+// if so. Leading '0' isn't allowed except 0 itself.
+template <typename CharT>
+bool CheckStringIsIndex(const CharT* s, size_t length, uint32_t* indexp);
+
+} /* namespace js */
+
+// clang-format off
+/*
+ * [SMDOC] JavaScript Strings
+ *
+ * Conceptually, a JS string is just an array of chars and a length. This array
+ * of chars may or may not be null-terminated and, if it is, the null character
+ * is not included in the length.
+ *
+ * To improve performance of common operations, the following optimizations are
+ * made which affect the engine's representation of strings:
+ *
+ * - The plain vanilla representation is a "linear" string which consists of a
+ * string header in the GC heap and a malloc'd char array.
+ *
+ * - To avoid copying a substring of an existing "base" string , a "dependent"
+ * string (JSDependentString) can be created which points into the base
+ * string's char array.
+ *
+ * - To avoid O(n^2) char buffer copying, a "rope" node (JSRope) can be created
+ * to represent a delayed string concatenation. Concatenation (called
+ * flattening) is performed if and when a linear char array is requested. In
+ * general, ropes form a binary dag whose internal nodes are JSRope string
+ * headers with no associated char array and whose leaf nodes are linear
+ * strings.
+ *
+ * - To avoid copying the leftmost string when flattening, we may produce an
+ * "extensible" string, which tracks not only its actual length but also its
+ * buffer's overall size. If such an "extensible" string appears as the
+ * leftmost string in a subsequent flatten, and its buffer has enough unused
+ * space, we can simply flatten the rest of the ropes into its buffer,
+ * leaving its text in place. We then transfer ownership of its buffer to the
+ * flattened rope, and mutate the donor extensible string into a dependent
+ * string referencing its original buffer.
+ *
+ * (The term "extensible" does not imply that we ever 'realloc' the buffer.
+ * Extensible strings may have dependent strings pointing into them, and the
+ * JSAPI hands out pointers to linear strings' buffers, so resizing with
+ * 'realloc' is generally not possible.)
+ *
+ * - To avoid allocating small char arrays, short strings can be stored inline
+ * in the string header (JSInlineString). These come in two flavours:
+ * JSThinInlineString, which is the same size as JSString; and
+ * JSFatInlineString, which has a larger header and so can fit more chars.
+ *
+ * - To avoid comparing O(n) string equality comparison, strings can be
+ * canonicalized to "atoms" (JSAtom) such that there is a single atom with a
+ * given (length,chars).
+ *
+ * - To avoid copying all strings created through the JSAPI, an "external"
+ * string (JSExternalString) can be created whose chars are managed by the
+ * JSAPI client.
+ *
+ * - To avoid using two bytes per character for every string, string
+ * characters are stored as Latin1 instead of TwoByte if all characters are
+ * representable in Latin1.
+ *
+ * - To avoid slow conversions from strings to integer indexes, we cache 16 bit
+ * unsigned indexes on strings representing such numbers.
+ *
+ * Although all strings share the same basic memory layout, we can conceptually
+ * arrange them into a hierarchy of operations/invariants and represent this
+ * hierarchy in C++ with classes:
+ *
+ * C++ type operations+fields / invariants+properties
+ * ========================== =========================================
+ * JSString (abstract) get(Latin1|TwoByte)CharsZ, get(Latin1|TwoByte)Chars, length / -
+ * | \
+ * | JSRope leftChild, rightChild / -
+ * |
+ * JSLinearString (abstract) latin1Chars, twoByteChars / -
+ * |
+ * +-- JSDependentString base / -
+ * |
+ * +-- JSExternalString - / char array memory managed by embedding
+ * |
+ * +-- JSExtensibleString tracks total buffer capacity (including current text)
+ * |
+ * +-- JSInlineString (abstract) - / chars stored in header
+ * | |
+ * | +-- JSThinInlineString - / header is normal
+ * | |
+ * | +-- JSFatInlineString - / header is fat
+ * |
+ * JSAtom (abstract) - / string equality === pointer equality
+ * | |
+ * | +-- js::NormalAtom - JSLinearString + atom hash code
+ * | |
+ * | +-- js::FatInlineAtom - JSFatInlineString + atom hash code
+ * |
+ * js::PropertyName - / chars don't contain an index (uint32_t)
+ *
+ * Classes marked with (abstract) above are not literally C++ Abstract Base
+ * Classes (since there are no virtual functions, pure or not, in this
+ * hierarchy), but have the same meaning: there are no strings with this type as
+ * its most-derived type.
+ *
+ * Atoms can additionally be permanent, i.e. unable to be collected, and can
+ * be combined with other string types to create additional most-derived types
+ * that satisfy the invariants of more than one of the abovementioned
+ * most-derived types. Furthermore, each atom stores a hash number (based on its
+ * chars). This hash number is used as key in the atoms table and when the atom
+ * is used as key in a JS Map/Set.
+ *
+ * Derived string types can be queried from ancestor types via isX() and
+ * retrieved with asX() debug-only-checked casts.
+ *
+ * The ensureX() operations mutate 'this' in place to effectively the type to be
+ * at least X (e.g., ensureLinear will change a JSRope to be a JSLinearString).
+ */
+// clang-format on
+
+class JSString : public js::gc::CellWithLengthAndFlags {
+ protected:
+ static const size_t NUM_INLINE_CHARS_LATIN1 =
+ 2 * sizeof(void*) / sizeof(JS::Latin1Char);
+ static const size_t NUM_INLINE_CHARS_TWO_BYTE =
+ 2 * sizeof(void*) / sizeof(char16_t);
+
+ public:
+ // String length and flags are stored in the cell header.
+ MOZ_ALWAYS_INLINE
+ size_t length() const { return headerLengthField(); }
+ MOZ_ALWAYS_INLINE
+ uint32_t flags() const { return headerFlagsField(); }
+
+ protected:
+ /* Fields only apply to string types commented on the right. */
+ struct Data {
+ // Note: 32-bit length and flags fields are inherited from
+ // CellWithLengthAndFlags.
+
+ union {
+ union {
+ /* JS(Fat)InlineString */
+ JS::Latin1Char inlineStorageLatin1[NUM_INLINE_CHARS_LATIN1];
+ char16_t inlineStorageTwoByte[NUM_INLINE_CHARS_TWO_BYTE];
+ };
+ struct {
+ union {
+ const JS::Latin1Char* nonInlineCharsLatin1; /* JSLinearString, except
+ JS(Fat)InlineString */
+ const char16_t* nonInlineCharsTwoByte; /* JSLinearString, except
+ JS(Fat)InlineString */
+ JSString* left; /* JSRope */
+ JSRope* parent; /* Used in flattening */
+ } u2;
+ union {
+ JSLinearString* base; /* JSDependentString */
+ JSString* right; /* JSRope */
+ size_t capacity; /* JSLinearString (extensible) */
+ const JSExternalStringCallbacks*
+ externalCallbacks; /* JSExternalString */
+ } u3;
+ } s;
+ };
+ } d;
+
+ public:
+ /* Flags exposed only for jits */
+
+ /*
+ * Flag Encoding
+ *
+ * The first word of a JSString stores flags, index, and (on some
+ * platforms) the length. The flags store both the string's type and its
+ * character encoding.
+ *
+ * If LATIN1_CHARS_BIT is set, the string's characters are stored as Latin1
+ * instead of TwoByte. This flag can also be set for ropes, if both the
+ * left and right nodes are Latin1. Flattening will result in a Latin1
+ * string in this case.
+ *
+ * The other flags store the string's type. Instead of using a dense index
+ * to represent the most-derived type, string types are encoded to allow
+ * single-op tests for hot queries (isRope, isDependent, isAtom) which, in
+ * view of subtyping, would require slower (isX() || isY() || isZ()).
+ *
+ * The string type encoding can be summarized as follows. The "instance
+ * encoding" entry for a type specifies the flag bits used to create a
+ * string instance of that type. Abstract types have no instances and thus
+ * have no such entry. The "subtype predicate" entry for a type specifies
+ * the predicate used to query whether a JSString instance is subtype
+ * (reflexively) of that type.
+ *
+ * String Instance Subtype
+ * type encoding predicate
+ * -----------------------------------------
+ * Rope 000000 000 xxxx0x xxx
+ * Linear - xxxx1x xxx
+ * Dependent 000110 000 xxx1xx xxx
+ * External 100010 000 100010 xxx
+ * Extensible 010010 000 010010 xxx
+ * Inline 001010 000 xx1xxx xxx
+ * FatInline 011010 000 x11xxx xxx
+ * NormalAtom 000011 000 xxxxx1 xxx
+ * PermanentAtom 100011 000 1xxxx1 xxx
+ * InlineAtom - xx1xx1 xxx
+ * FatInlineAtom - x11xx1 xxx
+ *
+ * Bits 0..2 are reserved for use by the GC (see
+ * gc::CellFlagBitsReservedForGC). In particular, bit 0 is currently used for
+ * FORWARD_BIT for forwarded nursery cells. The other 2 bits are currently
+ * unused.
+ *
+ * Note that the first 4 flag bits 3..6 (from right to left in the previous
+ * table) have the following meaning and can be used for some hot queries:
+ *
+ * Bit 3: IsAtom (Atom, PermanentAtom)
+ * Bit 4: IsLinear
+ * Bit 5: IsDependent
+ * Bit 6: IsInline (Inline, FatInline)
+ *
+ * If INDEX_VALUE_BIT is set, bits 16 and up will also hold an integer index.
+ */
+
+ // The low bits of flag word are reserved by GC.
+ static_assert(js::gc::CellFlagBitsReservedForGC <= 3,
+ "JSString::flags must reserve enough bits for Cell");
+
+ static const uint32_t ATOM_BIT = js::Bit(3);
+ static const uint32_t LINEAR_BIT = js::Bit(4);
+ static const uint32_t DEPENDENT_BIT = js::Bit(5);
+ static const uint32_t INLINE_CHARS_BIT = js::Bit(6);
+
+ static const uint32_t EXTENSIBLE_FLAGS = LINEAR_BIT | js::Bit(7);
+ static const uint32_t EXTERNAL_FLAGS = LINEAR_BIT | js::Bit(8);
+
+ static const uint32_t FAT_INLINE_MASK = INLINE_CHARS_BIT | js::Bit(7);
+
+ /* Initial flags for various types of strings. */
+ static const uint32_t INIT_THIN_INLINE_FLAGS = LINEAR_BIT | INLINE_CHARS_BIT;
+ static const uint32_t INIT_FAT_INLINE_FLAGS = LINEAR_BIT | FAT_INLINE_MASK;
+ static const uint32_t INIT_ROPE_FLAGS = 0;
+ static const uint32_t INIT_LINEAR_FLAGS = LINEAR_BIT;
+ static const uint32_t INIT_DEPENDENT_FLAGS = LINEAR_BIT | DEPENDENT_BIT;
+
+ static const uint32_t TYPE_FLAGS_MASK = js::BitMask(9) - js::BitMask(3);
+ static_assert((TYPE_FLAGS_MASK & js::gc::HeaderWord::RESERVED_MASK) == 0,
+ "GC reserved bits must not be used for Strings");
+
+ static const uint32_t LATIN1_CHARS_BIT = js::Bit(9);
+
+ // Whether this atom's characters store an uint32 index value less than or
+ // equal to MAX_ARRAY_INDEX. Not used for non-atomized strings.
+ // See JSLinearString::isIndex.
+ static const uint32_t ATOM_IS_INDEX_BIT = js::Bit(10);
+
+ static const uint32_t INDEX_VALUE_BIT = js::Bit(11);
+ static const uint32_t INDEX_VALUE_SHIFT = 16;
+
+ // NON_DEDUP_BIT is used in string deduplication during tenuring.
+ static const uint32_t NON_DEDUP_BIT = js::Bit(12);
+
+ // If IN_STRING_TO_ATOM_CACHE is set, this string had an entry in the
+ // StringToAtomCache at some point. Note that GC can purge the cache without
+ // clearing this bit.
+ static const uint32_t IN_STRING_TO_ATOM_CACHE = js::Bit(13);
+
+ // Flags used during rope flattening that indicate what action to perform when
+ // returning to the rope's parent rope.
+ static const uint32_t FLATTEN_VISIT_RIGHT = js::Bit(14);
+ static const uint32_t FLATTEN_FINISH_NODE = js::Bit(15);
+ static const uint32_t FLATTEN_MASK =
+ FLATTEN_VISIT_RIGHT | FLATTEN_FINISH_NODE;
+
+ static const uint32_t PINNED_ATOM_BIT = js::Bit(15);
+ static const uint32_t PERMANENT_ATOM_MASK =
+ ATOM_BIT | PINNED_ATOM_BIT | js::Bit(8);
+
+ static const uint32_t MAX_LENGTH = JS::MaxStringLength;
+
+ static const JS::Latin1Char MAX_LATIN1_CHAR = 0xff;
+
+ /*
+ * Helper function to validate that a string of a given length is
+ * representable by a JSString. An allocation overflow is reported if false
+ * is returned.
+ */
+ static inline bool validateLength(JSContext* maybecx, size_t length);
+
+ template <js::AllowGC allowGC>
+ static inline bool validateLengthInternal(JSContext* maybecx, size_t length);
+
+ static constexpr size_t offsetOfFlags() { return offsetOfHeaderFlags(); }
+ static constexpr size_t offsetOfLength() { return offsetOfHeaderLength(); }
+
+ bool sameLengthAndFlags(const JSString& other) const {
+ return length() == other.length() && flags() == other.flags();
+ }
+
+ static void staticAsserts() {
+ static_assert(JSString::MAX_LENGTH < UINT32_MAX,
+ "Length must fit in 32 bits");
+ static_assert(
+ sizeof(JSString) == (offsetof(JSString, d.inlineStorageLatin1) +
+ NUM_INLINE_CHARS_LATIN1 * sizeof(char)),
+ "Inline Latin1 chars must fit in a JSString");
+ static_assert(
+ sizeof(JSString) == (offsetof(JSString, d.inlineStorageTwoByte) +
+ NUM_INLINE_CHARS_TWO_BYTE * sizeof(char16_t)),
+ "Inline char16_t chars must fit in a JSString");
+
+ /* Ensure js::shadow::String has the same layout. */
+ using JS::shadow::String;
+ static_assert(
+ JSString::offsetOfRawHeaderFlagsField() == offsetof(String, flags_),
+ "shadow::String flags offset must match JSString");
+#if JS_BITS_PER_WORD == 32
+ static_assert(JSString::offsetOfLength() == offsetof(String, length_),
+ "shadow::String length offset must match JSString");
+#endif
+ static_assert(offsetof(JSString, d.s.u2.nonInlineCharsLatin1) ==
+ offsetof(String, nonInlineCharsLatin1),
+ "shadow::String nonInlineChars offset must match JSString");
+ static_assert(offsetof(JSString, d.s.u2.nonInlineCharsTwoByte) ==
+ offsetof(String, nonInlineCharsTwoByte),
+ "shadow::String nonInlineChars offset must match JSString");
+ static_assert(
+ offsetof(JSString, d.s.u3.externalCallbacks) ==
+ offsetof(String, externalCallbacks),
+ "shadow::String externalCallbacks offset must match JSString");
+ static_assert(offsetof(JSString, d.inlineStorageLatin1) ==
+ offsetof(String, inlineStorageLatin1),
+ "shadow::String inlineStorage offset must match JSString");
+ static_assert(offsetof(JSString, d.inlineStorageTwoByte) ==
+ offsetof(String, inlineStorageTwoByte),
+ "shadow::String inlineStorage offset must match JSString");
+ static_assert(ATOM_BIT == String::ATOM_BIT,
+ "shadow::String::ATOM_BIT must match JSString::ATOM_BIT");
+ static_assert(LINEAR_BIT == String::LINEAR_BIT,
+ "shadow::String::LINEAR_BIT must match JSString::LINEAR_BIT");
+ static_assert(INLINE_CHARS_BIT == String::INLINE_CHARS_BIT,
+ "shadow::String::INLINE_CHARS_BIT must match "
+ "JSString::INLINE_CHARS_BIT");
+ static_assert(LATIN1_CHARS_BIT == String::LATIN1_CHARS_BIT,
+ "shadow::String::LATIN1_CHARS_BIT must match "
+ "JSString::LATIN1_CHARS_BIT");
+ static_assert(
+ TYPE_FLAGS_MASK == String::TYPE_FLAGS_MASK,
+ "shadow::String::TYPE_FLAGS_MASK must match JSString::TYPE_FLAGS_MASK");
+ static_assert(
+ EXTERNAL_FLAGS == String::EXTERNAL_FLAGS,
+ "shadow::String::EXTERNAL_FLAGS must match JSString::EXTERNAL_FLAGS");
+ }
+
+ /* Avoid silly compile errors in JSRope::flatten */
+ friend class JSRope;
+
+ friend class js::gc::RelocationOverlay;
+
+ protected:
+ template <typename CharT>
+ MOZ_ALWAYS_INLINE void setNonInlineChars(const CharT* chars);
+
+ template <typename CharT>
+ static MOZ_ALWAYS_INLINE void checkStringCharsArena(const CharT* chars) {
+#ifdef MOZ_DEBUG
+ js::AssertJSStringBufferInCorrectArena(chars);
+#endif
+ }
+
+ // Get correct non-inline chars enum arm for given type
+ template <typename CharT>
+ MOZ_ALWAYS_INLINE const CharT* nonInlineCharsRaw() const;
+
+ public:
+ MOZ_ALWAYS_INLINE
+ bool empty() const { return length() == 0; }
+
+ inline bool getChar(JSContext* cx, size_t index, char16_t* code);
+
+ /* Strings have either Latin1 or TwoByte chars. */
+ bool hasLatin1Chars() const { return flags() & LATIN1_CHARS_BIT; }
+ bool hasTwoByteChars() const { return !(flags() & LATIN1_CHARS_BIT); }
+
+ /* Strings might contain cached indexes. */
+ bool hasIndexValue() const { return flags() & INDEX_VALUE_BIT; }
+ uint32_t getIndexValue() const {
+ MOZ_ASSERT(hasIndexValue());
+ MOZ_ASSERT(isLinear());
+ return flags() >> INDEX_VALUE_SHIFT;
+ }
+
+ inline size_t allocSize() const;
+
+ /* Fallible conversions to more-derived string types. */
+
+ inline JSLinearString* ensureLinear(JSContext* cx);
+
+ /* Type query and debug-checked casts */
+
+ MOZ_ALWAYS_INLINE
+ bool isRope() const { return !(flags() & LINEAR_BIT); }
+
+ MOZ_ALWAYS_INLINE
+ JSRope& asRope() const {
+ MOZ_ASSERT(isRope());
+ return *(JSRope*)this;
+ }
+
+ MOZ_ALWAYS_INLINE
+ bool isLinear() const { return flags() & LINEAR_BIT; }
+
+ MOZ_ALWAYS_INLINE
+ JSLinearString& asLinear() const {
+ MOZ_ASSERT(JSString::isLinear());
+ return *(JSLinearString*)this;
+ }
+
+ MOZ_ALWAYS_INLINE
+ bool isDependent() const { return flags() & DEPENDENT_BIT; }
+
+ MOZ_ALWAYS_INLINE
+ JSDependentString& asDependent() const {
+ MOZ_ASSERT(isDependent());
+ return *(JSDependentString*)this;
+ }
+
+ MOZ_ALWAYS_INLINE
+ bool isExtensible() const {
+ return (flags() & TYPE_FLAGS_MASK) == EXTENSIBLE_FLAGS;
+ }
+
+ MOZ_ALWAYS_INLINE
+ JSExtensibleString& asExtensible() const {
+ MOZ_ASSERT(isExtensible());
+ return *(JSExtensibleString*)this;
+ }
+
+ MOZ_ALWAYS_INLINE
+ bool isInline() const { return flags() & INLINE_CHARS_BIT; }
+
+ MOZ_ALWAYS_INLINE
+ JSInlineString& asInline() const {
+ MOZ_ASSERT(isInline());
+ return *(JSInlineString*)this;
+ }
+
+ MOZ_ALWAYS_INLINE
+ bool isFatInline() const {
+ return (flags() & FAT_INLINE_MASK) == FAT_INLINE_MASK;
+ }
+
+ /* For hot code, prefer other type queries. */
+ bool isExternal() const {
+ return (flags() & TYPE_FLAGS_MASK) == EXTERNAL_FLAGS;
+ }
+
+ MOZ_ALWAYS_INLINE
+ JSExternalString& asExternal() const {
+ MOZ_ASSERT(isExternal());
+ return *(JSExternalString*)this;
+ }
+
+ MOZ_ALWAYS_INLINE
+ bool isAtom() const { return flags() & ATOM_BIT; }
+
+ MOZ_ALWAYS_INLINE
+ bool isPermanentAtom() const {
+ return (flags() & PERMANENT_ATOM_MASK) == PERMANENT_ATOM_MASK;
+ }
+
+ MOZ_ALWAYS_INLINE
+ JSAtom& asAtom() const {
+ MOZ_ASSERT(isAtom());
+ return *(JSAtom*)this;
+ }
+
+ MOZ_ALWAYS_INLINE
+ void setNonDeduplicatable() { setFlagBit(NON_DEDUP_BIT); }
+
+ MOZ_ALWAYS_INLINE
+ void clearNonDeduplicatable() { clearFlagBit(NON_DEDUP_BIT); }
+
+ MOZ_ALWAYS_INLINE
+ bool isDeduplicatable() { return !(flags() & NON_DEDUP_BIT); }
+
+ void setInStringToAtomCache() {
+ MOZ_ASSERT(!isAtom());
+ setFlagBit(IN_STRING_TO_ATOM_CACHE);
+ }
+ bool inStringToAtomCache() const { return flags() & IN_STRING_TO_ATOM_CACHE; }
+
+ // Fills |array| with various strings that represent the different string
+ // kinds and character encodings.
+ static bool fillWithRepresentatives(JSContext* cx,
+ JS::Handle<js::ArrayObject*> array);
+
+ /* Only called by the GC for dependent strings. */
+
+ inline bool hasBase() const { return isDependent(); }
+
+ inline JSLinearString* base() const;
+
+ // The base may be forwarded and becomes a relocation overlay.
+ // The return value can be a relocation overlay when the base is forwarded,
+ // or the return value can be the actual base when it is not forwarded.
+ inline JSLinearString* nurseryBaseOrRelocOverlay() const;
+
+ inline bool canOwnDependentChars() const;
+
+ inline void setBase(JSLinearString* newBase);
+
+ void traceBase(JSTracer* trc);
+
+ /* Only called by the GC for strings with the AllocKind::STRING kind. */
+
+ inline void finalize(JS::GCContext* gcx);
+
+ /* Gets the number of bytes that the chars take on the heap. */
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
+
+ bool ownsMallocedChars() const {
+ return isLinear() && !isInline() && !isDependent() && !isExternal();
+ }
+
+ /* Encode as many scalar values of the string as UTF-8 as can fit
+ * into the caller-provided buffer replacing unpaired surrogates
+ * with the REPLACEMENT CHARACTER.
+ *
+ * Returns the number of code units read and the number of code units
+ * written.
+ *
+ * The semantics of this method match the semantics of
+ * TextEncoder.encodeInto().
+ *
+ * This function doesn't modify the representation -- rope, linear,
+ * flat, atom, etc. -- of this string. If this string is a rope,
+ * it also doesn't modify the representation of left or right halves
+ * of this string, or of those halves, and so on.
+ *
+ * Returns mozilla::Nothing on OOM.
+ */
+ mozilla::Maybe<std::tuple<size_t, size_t>> encodeUTF8Partial(
+ const JS::AutoRequireNoGC& nogc, mozilla::Span<char> buffer) const;
+
+ private:
+ // To help avoid writing Spectre-unsafe code, we only allow MacroAssembler
+ // to call the method below.
+ friend class js::jit::MacroAssembler;
+ static size_t offsetOfNonInlineChars() {
+ static_assert(
+ offsetof(JSString, d.s.u2.nonInlineCharsTwoByte) ==
+ offsetof(JSString, d.s.u2.nonInlineCharsLatin1),
+ "nonInlineCharsTwoByte and nonInlineCharsLatin1 must have same offset");
+ return offsetof(JSString, d.s.u2.nonInlineCharsTwoByte);
+ }
+
+ public:
+ static const JS::TraceKind TraceKind = JS::TraceKind::String;
+
+ JS::Zone* zone() const {
+ if (isTenured()) {
+ // Allow permanent atoms to be accessed across zones and runtimes.
+ if (isPermanentAtom()) {
+ return zoneFromAnyThread();
+ }
+ return asTenured().zone();
+ }
+ return nurseryZone();
+ }
+
+ void setLengthAndFlags(uint32_t len, uint32_t flags) {
+ setHeaderLengthAndFlags(len, flags);
+ }
+ void setFlagBit(uint32_t flag) { setHeaderFlagBit(flag); }
+ void clearFlagBit(uint32_t flag) { clearHeaderFlagBit(flag); }
+
+ void fixupAfterMovingGC() {}
+
+ js::gc::AllocKind getAllocKind() const {
+ using js::gc::AllocKind;
+ AllocKind kind;
+ if (isAtom()) {
+ if (isFatInline()) {
+ kind = AllocKind::FAT_INLINE_ATOM;
+ } else {
+ kind = AllocKind::ATOM;
+ }
+ } else if (isFatInline()) {
+ kind = AllocKind::FAT_INLINE_STRING;
+ } else if (isExternal()) {
+ kind = AllocKind::EXTERNAL_STRING;
+ } else {
+ kind = AllocKind::STRING;
+ }
+ MOZ_ASSERT_IF(isTenured(), kind == asTenured().getAllocKind());
+ return kind;
+ }
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+ void dump(); // Debugger-friendly stderr dump.
+ void dump(js::GenericPrinter& out);
+ void dumpNoNewline(js::GenericPrinter& out);
+ void dumpCharsNoNewline(js::GenericPrinter& out);
+ void dumpRepresentation(js::GenericPrinter& out, int indent) const;
+ void dumpRepresentationHeader(js::GenericPrinter& out,
+ const char* subclass) const;
+ void dumpCharsNoQuote(js::GenericPrinter& out);
+
+ template <typename CharT>
+ static void dumpChars(const CharT* s, size_t len, js::GenericPrinter& out);
+
+ template <typename CharT>
+ static void dumpCharsNoQuote(const CharT* s, size_t len,
+ js::GenericPrinter& out);
+
+ bool equals(const char* s);
+#endif
+
+ void traceChildren(JSTracer* trc);
+
+ // Override base class implementation to tell GC about permanent atoms.
+ bool isPermanentAndMayBeShared() const { return isPermanentAtom(); }
+
+ static void addCellAddressToStoreBuffer(js::gc::StoreBuffer* buffer,
+ js::gc::Cell** cellp) {
+ buffer->putCell(reinterpret_cast<JSString**>(cellp));
+ }
+
+ static void removeCellAddressFromStoreBuffer(js::gc::StoreBuffer* buffer,
+ js::gc::Cell** cellp) {
+ buffer->unputCell(reinterpret_cast<JSString**>(cellp));
+ }
+
+ private:
+ JSString(const JSString& other) = delete;
+ void operator=(const JSString& other) = delete;
+
+ protected:
+ JSString() = default;
+};
+
+class JSRope : public JSString {
+ friend class js::gc::CellAllocator;
+
+ template <typename CharT>
+ js::UniquePtr<CharT[], JS::FreePolicy> copyCharsInternal(
+ JSContext* cx, arena_id_t destArenaId) const;
+
+ enum UsingBarrier : bool { NoBarrier = false, WithIncrementalBarrier = true };
+
+ friend class JSString;
+ JSLinearString* flatten(JSContext* maybecx);
+
+ JSLinearString* flattenInternal();
+ template <UsingBarrier usingBarrier>
+ JSLinearString* flattenInternal();
+
+ template <UsingBarrier usingBarrier, typename CharT>
+ static JSLinearString* flattenInternal(JSRope* root);
+
+ template <UsingBarrier usingBarrier>
+ static void ropeBarrierDuringFlattening(JSRope* rope);
+
+ JSRope(JSString* left, JSString* right, size_t length);
+
+ public:
+ template <js::AllowGC allowGC>
+ static inline JSRope* new_(
+ JSContext* cx,
+ typename js::MaybeRooted<JSString*, allowGC>::HandleType left,
+ typename js::MaybeRooted<JSString*, allowGC>::HandleType right,
+ size_t length, js::gc::Heap = js::gc::Heap::Default);
+
+ js::UniquePtr<JS::Latin1Char[], JS::FreePolicy> copyLatin1Chars(
+ JSContext* maybecx, arena_id_t destArenaId) const;
+ JS::UniqueTwoByteChars copyTwoByteChars(JSContext* maybecx,
+ arena_id_t destArenaId) const;
+
+ template <typename CharT>
+ js::UniquePtr<CharT[], JS::FreePolicy> copyChars(
+ JSContext* maybecx, arena_id_t destArenaId) const;
+
+ // Hash function specific for ropes that avoids allocating a temporary
+ // string. There are still allocations internally so it's technically
+ // fallible.
+ //
+ // Returns the same value as if this were a linear string being hashed.
+ [[nodiscard]] bool hash(uint32_t* outhHash) const;
+
+ // The process of flattening a rope temporarily overwrites the left pointer of
+ // interior nodes in the rope DAG with the parent pointer.
+ bool isBeingFlattened() const { return flags() & FLATTEN_MASK; }
+
+ JSString* leftChild() const {
+ MOZ_ASSERT(isRope());
+ MOZ_ASSERT(!isBeingFlattened()); // Flattening overwrites this field.
+ return d.s.u2.left;
+ }
+
+ JSString* rightChild() const {
+ MOZ_ASSERT(isRope());
+ return d.s.u3.right;
+ }
+
+ void traceChildren(JSTracer* trc);
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+ void dumpRepresentation(js::GenericPrinter& out, int indent) const;
+#endif
+
+ private:
+ // To help avoid writing Spectre-unsafe code, we only allow MacroAssembler
+ // to call the methods below.
+ friend class js::jit::MacroAssembler;
+
+ static size_t offsetOfLeft() { return offsetof(JSRope, d.s.u2.left); }
+ static size_t offsetOfRight() { return offsetof(JSRope, d.s.u3.right); }
+};
+
+static_assert(sizeof(JSRope) == sizeof(JSString),
+ "string subclasses must be binary-compatible with JSString");
+
+/*
+ * There are optimized entry points for some string allocation functions.
+ *
+ * The meaning of suffix:
+ * * "MaybeDeflate": for char16_t variant, characters can fit Latin1
+ * * "DontDeflate": for char16_t variant, characters don't fit Latin1
+ * * "NonStatic": characters don't match StaticStrings
+ * * "ValidLength": length fits JSString::MAX_LENGTH
+ */
+
+class JSLinearString : public JSString {
+ friend class JSString;
+ friend class JS::AutoStableStringChars;
+ friend class js::gc::TenuringTracer;
+ friend class js::gc::CellAllocator;
+
+ /* Vacuous and therefore unimplemented. */
+ JSLinearString* ensureLinear(JSContext* cx) = delete;
+ bool isLinear() const = delete;
+ JSLinearString& asLinear() const = delete;
+
+ JSLinearString(const char16_t* chars, size_t length);
+ JSLinearString(const JS::Latin1Char* chars, size_t length);
+
+ protected:
+ // Used to construct subclasses that do a full initialization themselves.
+ JSLinearString() = default;
+
+ /* Returns void pointer to latin1/twoByte chars, for finalizers. */
+ MOZ_ALWAYS_INLINE
+ void* nonInlineCharsRaw() const {
+ MOZ_ASSERT(!isInline());
+ static_assert(
+ offsetof(JSLinearString, d.s.u2.nonInlineCharsTwoByte) ==
+ offsetof(JSLinearString, d.s.u2.nonInlineCharsLatin1),
+ "nonInlineCharsTwoByte and nonInlineCharsLatin1 must have same offset");
+ return (void*)d.s.u2.nonInlineCharsTwoByte;
+ }
+
+ MOZ_ALWAYS_INLINE const JS::Latin1Char* rawLatin1Chars() const;
+ MOZ_ALWAYS_INLINE const char16_t* rawTwoByteChars() const;
+
+ public:
+ template <js::AllowGC allowGC, typename CharT>
+ static inline JSLinearString* new_(
+ JSContext* cx, js::UniquePtr<CharT[], JS::FreePolicy> chars,
+ size_t length, js::gc::Heap heap);
+
+ template <js::AllowGC allowGC, typename CharT>
+ static inline JSLinearString* newValidLength(
+ JSContext* cx, js::UniquePtr<CharT[], JS::FreePolicy> chars,
+ size_t length, js::gc::Heap heap);
+
+ // Convert a plain linear string to an extensible string. For testing. The
+ // caller must ensure that it is a plain or extensible string already, and
+ // that `capacity` is adequate.
+ JSExtensibleString& makeExtensible(size_t capacity);
+
+ template <typename CharT>
+ MOZ_ALWAYS_INLINE const CharT* nonInlineChars(
+ const JS::AutoRequireNoGC& nogc) const;
+
+ MOZ_ALWAYS_INLINE
+ const JS::Latin1Char* nonInlineLatin1Chars(
+ const JS::AutoRequireNoGC& nogc) const {
+ MOZ_ASSERT(!isInline());
+ MOZ_ASSERT(hasLatin1Chars());
+ return d.s.u2.nonInlineCharsLatin1;
+ }
+
+ MOZ_ALWAYS_INLINE
+ const char16_t* nonInlineTwoByteChars(const JS::AutoRequireNoGC& nogc) const {
+ MOZ_ASSERT(!isInline());
+ MOZ_ASSERT(hasTwoByteChars());
+ return d.s.u2.nonInlineCharsTwoByte;
+ }
+
+ template <typename CharT>
+ MOZ_ALWAYS_INLINE const CharT* chars(const JS::AutoRequireNoGC& nogc) const;
+
+ MOZ_ALWAYS_INLINE
+ const JS::Latin1Char* latin1Chars(const JS::AutoRequireNoGC& nogc) const {
+ return rawLatin1Chars();
+ }
+
+ MOZ_ALWAYS_INLINE
+ const char16_t* twoByteChars(const JS::AutoRequireNoGC& nogc) const {
+ return rawTwoByteChars();
+ }
+
+ mozilla::Range<const JS::Latin1Char> latin1Range(
+ const JS::AutoRequireNoGC& nogc) const {
+ MOZ_ASSERT(JSString::isLinear());
+ return mozilla::Range<const JS::Latin1Char>(latin1Chars(nogc), length());
+ }
+
+ mozilla::Range<const char16_t> twoByteRange(
+ const JS::AutoRequireNoGC& nogc) const {
+ MOZ_ASSERT(JSString::isLinear());
+ return mozilla::Range<const char16_t>(twoByteChars(nogc), length());
+ }
+
+ MOZ_ALWAYS_INLINE
+ char16_t latin1OrTwoByteChar(size_t index) const {
+ MOZ_ASSERT(JSString::isLinear());
+ MOZ_ASSERT(index < length());
+ JS::AutoCheckCannotGC nogc;
+ return hasLatin1Chars() ? latin1Chars(nogc)[index]
+ : twoByteChars(nogc)[index];
+ }
+
+ bool isIndexSlow(uint32_t* indexp) const {
+ MOZ_ASSERT(JSString::isLinear());
+ size_t len = length();
+ if (len == 0 || len > js::UINT32_CHAR_BUFFER_LENGTH) {
+ return false;
+ }
+ JS::AutoCheckCannotGC nogc;
+ if (hasLatin1Chars()) {
+ const JS::Latin1Char* s = latin1Chars(nogc);
+ return mozilla::IsAsciiDigit(*s) &&
+ js::CheckStringIsIndex(s, len, indexp);
+ }
+ const char16_t* s = twoByteChars(nogc);
+ return mozilla::IsAsciiDigit(*s) && js::CheckStringIsIndex(s, len, indexp);
+ }
+
+ // Returns true if this string's characters store an unsigned 32-bit integer
+ // value less than or equal to MAX_ARRAY_INDEX, initializing *indexp to that
+ // value if so. Leading '0' isn't allowed except 0 itself.
+ // (Thus if calling isIndex returns true, js::IndexToString(cx, *indexp) will
+ // be a string equal to this string.)
+ inline bool isIndex(uint32_t* indexp) const;
+
+ void maybeInitializeIndexValue(uint32_t index, bool allowAtom = false) {
+ MOZ_ASSERT(JSString::isLinear());
+ MOZ_ASSERT_IF(hasIndexValue(), getIndexValue() == index);
+ MOZ_ASSERT_IF(!allowAtom, !isAtom());
+
+ if (hasIndexValue() || index > UINT16_MAX) {
+ return;
+ }
+
+ mozilla::DebugOnly<uint32_t> containedIndex;
+ MOZ_ASSERT(isIndexSlow(&containedIndex));
+ MOZ_ASSERT(index == containedIndex);
+
+ setFlagBit((index << INDEX_VALUE_SHIFT) | INDEX_VALUE_BIT);
+ MOZ_ASSERT(getIndexValue() == index);
+ }
+
+ /*
+ * Returns a property name represented by this string, or null on failure.
+ * You must verify that this is not an index per isIndex before calling
+ * this method.
+ */
+ inline js::PropertyName* toPropertyName(JSContext* cx);
+
+ inline void finalize(JS::GCContext* gcx);
+ inline size_t allocSize() const;
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+ void dumpRepresentationChars(js::GenericPrinter& out, int indent) const;
+ void dumpRepresentation(js::GenericPrinter& out, int indent) const;
+#endif
+
+ // Make a partially-initialized string safe for finalization.
+ inline void disownCharsBecauseError();
+};
+
+static_assert(sizeof(JSLinearString) == sizeof(JSString),
+ "string subclasses must be binary-compatible with JSString");
+
+class JSDependentString : public JSLinearString {
+ friend class JSString;
+ friend class js::gc::CellAllocator;
+
+ JSDependentString(JSLinearString* base, size_t start, size_t length);
+
+ // For JIT string allocation.
+ JSDependentString() = default;
+
+ /* Vacuous and therefore unimplemented. */
+ bool isDependent() const = delete;
+ JSDependentString& asDependent() const = delete;
+
+ /* The offset of this string's chars in base->chars(). */
+ MOZ_ALWAYS_INLINE size_t baseOffset() const {
+ MOZ_ASSERT(JSString::isDependent());
+ JS::AutoCheckCannotGC nogc;
+ size_t offset;
+ if (hasTwoByteChars()) {
+ offset = twoByteChars(nogc) - base()->twoByteChars(nogc);
+ } else {
+ offset = latin1Chars(nogc) - base()->latin1Chars(nogc);
+ }
+ MOZ_ASSERT(offset < base()->length());
+ return offset;
+ }
+
+ public:
+ // This may return an inline string if the chars fit rather than a dependent
+ // string.
+ static inline JSLinearString* new_(JSContext* cx, JSLinearString* base,
+ size_t start, size_t length,
+ js::gc::Heap heap);
+
+ template <typename T>
+ void relocateNonInlineChars(T chars, size_t offset) {
+ setNonInlineChars(chars + offset);
+ }
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+ void dumpRepresentation(js::GenericPrinter& out, int indent) const;
+#endif
+
+ private:
+ // To help avoid writing Spectre-unsafe code, we only allow MacroAssembler
+ // to call the method below.
+ friend class js::jit::MacroAssembler;
+
+ inline static size_t offsetOfBase() {
+ return offsetof(JSDependentString, d.s.u3.base);
+ }
+};
+
+static_assert(sizeof(JSDependentString) == sizeof(JSString),
+ "string subclasses must be binary-compatible with JSString");
+
+class JSExtensibleString : public JSLinearString {
+ /* Vacuous and therefore unimplemented. */
+ bool isExtensible() const = delete;
+ JSExtensibleString& asExtensible() const = delete;
+
+ public:
+ MOZ_ALWAYS_INLINE
+ size_t capacity() const {
+ MOZ_ASSERT(JSString::isExtensible());
+ return d.s.u3.capacity;
+ }
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+ void dumpRepresentation(js::GenericPrinter& out, int indent) const;
+#endif
+};
+
+static_assert(sizeof(JSExtensibleString) == sizeof(JSString),
+ "string subclasses must be binary-compatible with JSString");
+
+class JSInlineString : public JSLinearString {
+ public:
+ MOZ_ALWAYS_INLINE
+ const JS::Latin1Char* latin1Chars(const JS::AutoRequireNoGC& nogc) const {
+ MOZ_ASSERT(JSString::isInline());
+ MOZ_ASSERT(hasLatin1Chars());
+ return d.inlineStorageLatin1;
+ }
+
+ MOZ_ALWAYS_INLINE
+ const char16_t* twoByteChars(const JS::AutoRequireNoGC& nogc) const {
+ MOZ_ASSERT(JSString::isInline());
+ MOZ_ASSERT(hasTwoByteChars());
+ return d.inlineStorageTwoByte;
+ }
+
+ template <typename CharT>
+ static bool lengthFits(size_t length);
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+ void dumpRepresentation(js::GenericPrinter& out, int indent) const;
+#endif
+
+ private:
+ // To help avoid writing Spectre-unsafe code, we only allow MacroAssembler
+ // to call the method below.
+ friend class js::jit::MacroAssembler;
+ static size_t offsetOfInlineStorage() {
+ return offsetof(JSInlineString, d.inlineStorageTwoByte);
+ }
+};
+
+static_assert(sizeof(JSInlineString) == sizeof(JSString),
+ "string subclasses must be binary-compatible with JSString");
+
+/*
+ * On 32-bit platforms, JSThinInlineString can store 8 Latin1 characters or 4
+ * TwoByte characters inline. On 64-bit platforms, these numbers are 16 and 8,
+ * respectively.
+ */
+class JSThinInlineString : public JSInlineString {
+ friend class js::gc::CellAllocator;
+
+ // The constructors return a mutable pointer to the data, because the first
+ // thing any creator will do is copy in the string value. This also
+ // conveniently allows doing overload resolution on CharT.
+ explicit JSThinInlineString(size_t length, JS::Latin1Char** chars);
+ explicit JSThinInlineString(size_t length, char16_t** chars);
+
+ // For JIT string allocation.
+ JSThinInlineString() = default;
+
+ public:
+ static const size_t MAX_LENGTH_LATIN1 = NUM_INLINE_CHARS_LATIN1;
+ static const size_t MAX_LENGTH_TWO_BYTE = NUM_INLINE_CHARS_TWO_BYTE;
+
+ template <js::AllowGC allowGC>
+ static inline JSThinInlineString* new_(JSContext* cx, js::gc::Heap heap);
+
+ template <typename CharT>
+ static bool lengthFits(size_t length);
+};
+
+static_assert(sizeof(JSThinInlineString) == sizeof(JSString),
+ "string subclasses must be binary-compatible with JSString");
+
+/*
+ * On both 32-bit and 64-bit platforms, MAX_LENGTH_TWO_BYTE is 12 and
+ * MAX_LENGTH_LATIN1 is 24. This is deliberate, in order to minimize potential
+ * performance differences between 32-bit and 64-bit platforms.
+ *
+ * There are still some differences due to NUM_INLINE_CHARS_* being different.
+ * E.g. TwoByte strings of length 5--8 will be JSFatInlineStrings on 32-bit
+ * platforms and JSThinInlineStrings on 64-bit platforms. But the more
+ * significant transition from inline strings to non-inline strings occurs at
+ * length 12 (for TwoByte strings) and 24 (Latin1 strings) on both 32-bit and
+ * 64-bit platforms.
+ */
+class JSFatInlineString : public JSInlineString {
+ friend class js::gc::CellAllocator;
+
+ static const size_t INLINE_EXTENSION_CHARS_LATIN1 =
+ 24 - NUM_INLINE_CHARS_LATIN1;
+ static const size_t INLINE_EXTENSION_CHARS_TWO_BYTE =
+ 12 - NUM_INLINE_CHARS_TWO_BYTE;
+
+ // The constructors return a mutable pointer to the data, because the first
+ // thing any creator will do is copy in the string value. This also
+ // conveniently allows doing overload resolution on CharT.
+ explicit JSFatInlineString(size_t length, JS::Latin1Char** chars);
+ explicit JSFatInlineString(size_t length, char16_t** chars);
+
+ // For JIT string allocation.
+ JSFatInlineString() = default;
+
+ protected: /* to fool clang into not warning this is unused */
+ union {
+ char inlineStorageExtensionLatin1[INLINE_EXTENSION_CHARS_LATIN1];
+ char16_t inlineStorageExtensionTwoByte[INLINE_EXTENSION_CHARS_TWO_BYTE];
+ };
+
+ public:
+ template <js::AllowGC allowGC>
+ static inline JSFatInlineString* new_(JSContext* cx, js::gc::Heap heap);
+
+ static const size_t MAX_LENGTH_LATIN1 =
+ JSString::NUM_INLINE_CHARS_LATIN1 + INLINE_EXTENSION_CHARS_LATIN1;
+
+ static const size_t MAX_LENGTH_TWO_BYTE =
+ JSString::NUM_INLINE_CHARS_TWO_BYTE + INLINE_EXTENSION_CHARS_TWO_BYTE;
+
+ template <typename CharT>
+ static bool lengthFits(size_t length);
+
+ // Only called by the GC for strings with the AllocKind::FAT_INLINE_STRING
+ // kind.
+ MOZ_ALWAYS_INLINE void finalize(JS::GCContext* gcx);
+};
+
+static_assert(sizeof(JSFatInlineString) % js::gc::CellAlignBytes == 0,
+ "fat inline strings shouldn't waste space up to the next cell "
+ "boundary");
+
+class JSExternalString : public JSLinearString {
+ friend class js::gc::CellAllocator;
+
+ JSExternalString(const char16_t* chars, size_t length,
+ const JSExternalStringCallbacks* callbacks);
+
+ /* Vacuous and therefore unimplemented. */
+ bool isExternal() const = delete;
+ JSExternalString& asExternal() const = delete;
+
+ public:
+ static inline JSExternalString* new_(
+ JSContext* cx, const char16_t* chars, size_t length,
+ const JSExternalStringCallbacks* callbacks);
+
+ const JSExternalStringCallbacks* callbacks() const {
+ MOZ_ASSERT(JSString::isExternal());
+ return d.s.u3.externalCallbacks;
+ }
+
+ // External chars are never allocated inline or in the nursery, so we can
+ // safely expose this without requiring an AutoCheckCannotGC argument.
+ const char16_t* twoByteChars() const { return rawTwoByteChars(); }
+
+ // Only called by the GC for strings with the AllocKind::EXTERNAL_STRING
+ // kind.
+ inline void finalize(JS::GCContext* gcx);
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+ void dumpRepresentation(js::GenericPrinter& out, int indent) const;
+#endif
+};
+
+static_assert(sizeof(JSExternalString) == sizeof(JSString),
+ "string subclasses must be binary-compatible with JSString");
+
+class JSAtom : public JSLinearString {
+ /* Vacuous and therefore unimplemented. */
+ bool isAtom() const = delete;
+ JSAtom& asAtom() const = delete;
+
+ public:
+ template <typename CharT>
+ static inline JSAtom* newValidLength(
+ JSContext* cx, js::UniquePtr<CharT[], JS::FreePolicy> chars,
+ size_t length, js::HashNumber hash);
+
+ /* Returns the PropertyName for this. isIndex() must be false. */
+ inline js::PropertyName* asPropertyName();
+
+ MOZ_ALWAYS_INLINE
+ bool isPermanent() const { return JSString::isPermanentAtom(); }
+
+ MOZ_ALWAYS_INLINE
+ void makePermanent() {
+ MOZ_ASSERT(JSString::isAtom());
+ setFlagBit(PERMANENT_ATOM_MASK);
+ }
+
+ MOZ_ALWAYS_INLINE bool isIndex() const {
+ MOZ_ASSERT(JSString::isAtom());
+ mozilla::DebugOnly<uint32_t> index;
+ MOZ_ASSERT(!!(flags() & ATOM_IS_INDEX_BIT) == isIndexSlow(&index));
+ return flags() & ATOM_IS_INDEX_BIT;
+ }
+ MOZ_ALWAYS_INLINE bool isIndex(uint32_t* index) const {
+ MOZ_ASSERT(JSString::isAtom());
+ if (!isIndex()) {
+ return false;
+ }
+ *index = hasIndexValue() ? getIndexValue() : getIndexSlow();
+ return true;
+ }
+
+ uint32_t getIndexSlow() const;
+
+ void setIsIndex(uint32_t index) {
+ MOZ_ASSERT(JSString::isAtom());
+ setFlagBit(ATOM_IS_INDEX_BIT);
+ maybeInitializeIndexValue(index, /* allowAtom = */ true);
+ }
+
+ MOZ_ALWAYS_INLINE bool isPinned() const { return flags() & PINNED_ATOM_BIT; }
+
+ void setPinned() {
+ MOZ_ASSERT(!isPinned());
+ setFlagBit(PINNED_ATOM_BIT);
+ }
+
+ inline js::HashNumber hash() const;
+ inline void initHash(js::HashNumber hash);
+
+#if defined(DEBUG) || defined(JS_JITSPEW) || defined(JS_CACHEIR_SPEW)
+ void dump(js::GenericPrinter& out);
+ void dump();
+#endif
+};
+
+static_assert(sizeof(JSAtom) == sizeof(JSString),
+ "string subclasses must be binary-compatible with JSString");
+
+namespace js {
+
+class NormalAtom : public JSAtom {
+ friend class gc::CellAllocator;
+
+ protected:
+ HashNumber hash_;
+
+ // Inline atoms, mimicking JSThinInlineString constructors.
+ explicit NormalAtom(size_t length, JS::Latin1Char** chars,
+ js::HashNumber hash);
+ explicit NormalAtom(size_t length, char16_t** chars, js::HashNumber hash);
+
+ // Out of line atoms, mimicking JSLinearString constructors.
+ NormalAtom(const char16_t* chars, size_t length, js::HashNumber hash);
+ NormalAtom(const JS::Latin1Char* chars, size_t length, js::HashNumber hash);
+
+ public:
+ HashNumber hash() const { return hash_; }
+ void initHash(HashNumber hash) { hash_ = hash; }
+
+ static constexpr size_t offsetOfHash() { return offsetof(NormalAtom, hash_); }
+};
+
+static_assert(sizeof(NormalAtom) == sizeof(JSString) + sizeof(uint64_t),
+ "NormalAtom must have size of a string + HashNumber, "
+ "aligned to gc::CellAlignBytes");
+
+class FatInlineAtom : public JSAtom {
+ friend class gc::CellAllocator;
+
+ protected: // Silence Clang unused-field warning.
+ char inlineStorage_[sizeof(JSFatInlineString) - sizeof(JSString)];
+ HashNumber hash_;
+
+ // Mimicking JSFatInlineString constructors.
+ explicit FatInlineAtom(size_t length, JS::Latin1Char** chars,
+ js::HashNumber hash);
+ explicit FatInlineAtom(size_t length, char16_t** chars, js::HashNumber hash);
+
+ public:
+ HashNumber hash() const { return hash_; }
+ void initHash(HashNumber hash) { hash_ = hash; }
+
+ inline void finalize(JS::GCContext* gcx);
+
+ static constexpr size_t offsetOfHash() {
+ return offsetof(FatInlineAtom, hash_);
+ }
+};
+
+static_assert(
+ sizeof(FatInlineAtom) == sizeof(JSFatInlineString) + sizeof(uint64_t),
+ "FatInlineAtom must have size of a fat inline string + HashNumber, "
+ "aligned to gc::CellAlignBytes");
+
+// When an algorithm does not need a string represented as a single linear
+// array of characters, this range utility may be used to traverse the string a
+// sequence of linear arrays of characters. This avoids flattening ropes.
+template <size_t Size = 16>
+class StringSegmentRange {
+ // If malloc() shows up in any profiles from this vector, we can add a new
+ // StackAllocPolicy which stashes a reusable freed-at-gc buffer in the cx.
+ using StackVector = JS::GCVector<JSString*, Size>;
+ Rooted<StackVector> stack;
+ Rooted<JSLinearString*> cur;
+
+ bool settle(JSString* str) {
+ while (str->isRope()) {
+ JSRope& rope = str->asRope();
+ if (!stack.append(rope.rightChild())) {
+ return false;
+ }
+ str = rope.leftChild();
+ }
+ cur = &str->asLinear();
+ return true;
+ }
+
+ public:
+ explicit StringSegmentRange(JSContext* cx)
+ : stack(cx, StackVector(cx)), cur(cx) {}
+
+ [[nodiscard]] bool init(JSString* str) {
+ MOZ_ASSERT(stack.empty());
+ return settle(str);
+ }
+
+ bool empty() const { return cur == nullptr; }
+
+ JSLinearString* front() const {
+ MOZ_ASSERT(!cur->isRope());
+ return cur;
+ }
+
+ [[nodiscard]] bool popFront() {
+ MOZ_ASSERT(!empty());
+ if (stack.empty()) {
+ cur = nullptr;
+ return true;
+ }
+ return settle(stack.popCopy());
+ }
+};
+
+} // namespace js
+
+inline js::HashNumber JSAtom::hash() const {
+ if (isFatInline()) {
+ return static_cast<const js::FatInlineAtom*>(this)->hash();
+ }
+ return static_cast<const js::NormalAtom*>(this)->hash();
+}
+
+inline void JSAtom::initHash(js::HashNumber hash) {
+ if (isFatInline()) {
+ return static_cast<js::FatInlineAtom*>(this)->initHash(hash);
+ }
+ return static_cast<js::NormalAtom*>(this)->initHash(hash);
+}
+
+namespace js {
+
+/*
+ * Represents an atomized string which does not contain an index (that is, an
+ * unsigned 32-bit value). Thus for any PropertyName propname,
+ * ToString(ToUint32(propname)) never equals propname.
+ *
+ * To more concretely illustrate the utility of PropertyName, consider that it
+ * is used to partition, in a type-safe manner, the ways to refer to a
+ * property, as follows:
+ *
+ * - uint32_t indexes,
+ * - PropertyName strings which don't encode uint32_t indexes, and
+ * - jsspecial special properties (non-ES5 properties like object-valued
+ * jsids, JSID_EMPTY, JSID_VOID, and maybe in the future Harmony-proposed
+ * private names).
+ */
+class PropertyName : public JSAtom {
+ private:
+ /* Vacuous and therefore unimplemented. */
+ PropertyName* asPropertyName() = delete;
+};
+
+static_assert(sizeof(PropertyName) == sizeof(JSString),
+ "string subclasses must be binary-compatible with JSString");
+
+static MOZ_ALWAYS_INLINE jsid NameToId(PropertyName* name) {
+ return JS::PropertyKey::NonIntAtom(name);
+}
+
+using PropertyNameVector = JS::GCVector<PropertyName*>;
+
+template <typename CharT>
+void CopyChars(CharT* dest, const JSLinearString& str);
+
+static inline UniqueChars StringToNewUTF8CharsZ(JSContext* cx, JSString& str) {
+ JS::AutoCheckCannotGC nogc;
+
+ JSLinearString* linear = str.ensureLinear(cx);
+ if (!linear) {
+ return nullptr;
+ }
+
+ return UniqueChars(
+ linear->hasLatin1Chars()
+ ? JS::CharsToNewUTF8CharsZ(cx, linear->latin1Range(nogc)).c_str()
+ : JS::CharsToNewUTF8CharsZ(cx, linear->twoByteRange(nogc)).c_str());
+}
+
+/**
+ * Allocate a string with the given contents. If |allowGC == CanGC|, this may
+ * trigger a GC.
+ */
+template <js::AllowGC allowGC, typename CharT>
+extern JSLinearString* NewString(JSContext* cx,
+ UniquePtr<CharT[], JS::FreePolicy> chars,
+ size_t length,
+ js::gc::Heap heap = js::gc::Heap::Default);
+
+/* Like NewString, but doesn't try to deflate to Latin1. */
+template <js::AllowGC allowGC, typename CharT>
+extern JSLinearString* NewStringDontDeflate(
+ JSContext* cx, UniquePtr<CharT[], JS::FreePolicy> chars, size_t length,
+ js::gc::Heap heap = js::gc::Heap::Default);
+
+extern JSLinearString* NewDependentString(
+ JSContext* cx, JSString* base, size_t start, size_t length,
+ js::gc::Heap heap = js::gc::Heap::Default);
+
+/* Take ownership of an array of Latin1Chars. */
+extern JSLinearString* NewLatin1StringZ(
+ JSContext* cx, UniqueChars chars,
+ js::gc::Heap heap = js::gc::Heap::Default);
+
+/* Copy a counted string and GC-allocate a descriptor for it. */
+template <js::AllowGC allowGC, typename CharT>
+extern JSLinearString* NewStringCopyN(
+ JSContext* cx, const CharT* s, size_t n,
+ js::gc::Heap heap = js::gc::Heap::Default);
+
+template <js::AllowGC allowGC>
+inline JSLinearString* NewStringCopyN(
+ JSContext* cx, const char* s, size_t n,
+ js::gc::Heap heap = js::gc::Heap::Default) {
+ return NewStringCopyN<allowGC>(cx, reinterpret_cast<const Latin1Char*>(s), n,
+ heap);
+}
+
+template <typename CharT>
+extern JSAtom* NewAtomCopyNMaybeDeflateValidLength(JSContext* cx,
+ const CharT* s, size_t n,
+ js::HashNumber hash);
+
+template <typename CharT>
+extern JSAtom* NewAtomCopyNDontDeflateValidLength(JSContext* cx, const CharT* s,
+ size_t n,
+ js::HashNumber hash);
+
+/* Copy a counted string and GC-allocate a descriptor for it. */
+template <js::AllowGC allowGC, typename CharT>
+inline JSLinearString* NewStringCopy(
+ JSContext* cx, mozilla::Span<const CharT> s,
+ js::gc::Heap heap = js::gc::Heap::Default) {
+ return NewStringCopyN<allowGC>(cx, s.data(), s.size(), heap);
+}
+
+/* Copy a counted string and GC-allocate a descriptor for it. */
+template <js::AllowGC allowGC, typename CharT>
+inline JSLinearString* NewStringCopy(
+ JSContext* cx, std::basic_string_view<CharT> s,
+ js::gc::Heap heap = js::gc::Heap::Default) {
+ return NewStringCopyN<allowGC>(cx, s.data(), s.size(), heap);
+}
+
+/* Like NewStringCopyN, but doesn't try to deflate to Latin1. */
+template <js::AllowGC allowGC, typename CharT>
+extern JSLinearString* NewStringCopyNDontDeflate(
+ JSContext* cx, const CharT* s, size_t n,
+ js::gc::Heap heap = js::gc::Heap::Default);
+
+template <js::AllowGC allowGC, typename CharT>
+extern JSLinearString* NewStringCopyNDontDeflateNonStaticValidLength(
+ JSContext* cx, const CharT* s, size_t n,
+ js::gc::Heap heap = js::gc::Heap::Default);
+
+/* Copy a C string and GC-allocate a descriptor for it. */
+template <js::AllowGC allowGC>
+inline JSLinearString* NewStringCopyZ(
+ JSContext* cx, const char16_t* s,
+ js::gc::Heap heap = js::gc::Heap::Default) {
+ return NewStringCopyN<allowGC>(cx, s, js_strlen(s), heap);
+}
+
+template <js::AllowGC allowGC>
+inline JSLinearString* NewStringCopyZ(
+ JSContext* cx, const char* s, js::gc::Heap heap = js::gc::Heap::Default) {
+ return NewStringCopyN<allowGC>(cx, s, strlen(s), heap);
+}
+
+extern JSLinearString* NewStringCopyUTF8N(
+ JSContext* cx, const JS::UTF8Chars utf8,
+ js::gc::Heap heap = js::gc::Heap::Default);
+
+inline JSLinearString* NewStringCopyUTF8Z(
+ JSContext* cx, const JS::ConstUTF8CharsZ utf8,
+ js::gc::Heap heap = js::gc::Heap::Default) {
+ return NewStringCopyUTF8N(
+ cx, JS::UTF8Chars(utf8.c_str(), strlen(utf8.c_str())), heap);
+}
+
+JSString* NewMaybeExternalString(JSContext* cx, const char16_t* s, size_t n,
+ const JSExternalStringCallbacks* callbacks,
+ bool* allocatedExternal,
+ js::gc::Heap heap = js::gc::Heap::Default);
+
+static_assert(sizeof(HashNumber) == 4);
+
+template <AllowGC allowGC>
+extern JSString* ConcatStrings(
+ JSContext* cx, typename MaybeRooted<JSString*, allowGC>::HandleType left,
+ typename MaybeRooted<JSString*, allowGC>::HandleType right,
+ js::gc::Heap heap = js::gc::Heap::Default);
+
+/*
+ * Test if strings are equal. The caller can call the function even if str1
+ * or str2 are not GC-allocated things.
+ */
+extern bool EqualStrings(JSContext* cx, JSString* str1, JSString* str2,
+ bool* result);
+
+/* Use the infallible method instead! */
+extern bool EqualStrings(JSContext* cx, JSLinearString* str1,
+ JSLinearString* str2, bool* result) = delete;
+
+/* EqualStrings is infallible on linear strings. */
+extern bool EqualStrings(const JSLinearString* str1,
+ const JSLinearString* str2);
+
+/**
+ * Compare two strings that are known to be the same length.
+ * Exposed for the JITs; for ordinary uses, EqualStrings() is more sensible.
+ *
+ * Precondition: str1->length() == str2->length().
+ */
+extern bool EqualChars(const JSLinearString* str1, const JSLinearString* str2);
+
+/*
+ * Return less than, equal to, or greater than zero depending on whether
+ * `s1[0..len1]` is less than, equal to, or greater than `s2`.
+ */
+extern int32_t CompareChars(const char16_t* s1, size_t len1,
+ JSLinearString* s2);
+
+/*
+ * Compare two strings, like CompareChars, but store the result in `*result`.
+ * This flattens the strings and therefore can fail.
+ */
+extern bool CompareStrings(JSContext* cx, JSString* str1, JSString* str2,
+ int32_t* result);
+
+/*
+ * Compare two strings, like CompareChars.
+ */
+extern int32_t CompareStrings(const JSLinearString* str1,
+ const JSLinearString* str2);
+
+/**
+ * Return true if the string contains only ASCII characters.
+ */
+extern bool StringIsAscii(JSLinearString* str);
+
+/*
+ * Return true if the string matches the given sequence of ASCII bytes.
+ */
+extern bool StringEqualsAscii(JSLinearString* str, const char* asciiBytes);
+/*
+ * Return true if the string matches the given sequence of ASCII
+ * bytes. The sequence of ASCII bytes must have length "length". The
+ * length should not include the trailing null, if any.
+ */
+extern bool StringEqualsAscii(JSLinearString* str, const char* asciiBytes,
+ size_t length);
+
+template <size_t N>
+bool StringEqualsLiteral(JSLinearString* str, const char (&asciiBytes)[N]) {
+ MOZ_ASSERT(asciiBytes[N - 1] == '\0');
+ return StringEqualsAscii(str, asciiBytes, N - 1);
+}
+
+extern int StringFindPattern(JSLinearString* text, JSLinearString* pat,
+ size_t start);
+
+/**
+ * Return true if the string contains a pattern at |start|.
+ *
+ * Precondition: `text` is long enough that this might be true;
+ * that is, it has at least `start + pat->length()` characters.
+ */
+extern bool HasSubstringAt(JSLinearString* text, JSLinearString* pat,
+ size_t start);
+
+/*
+ * Computes |str|'s substring for the range [beginInt, beginInt + lengthInt).
+ * Negative, overlarge, swapped, etc. |beginInt| and |lengthInt| are forbidden
+ * and constitute API misuse.
+ */
+JSString* SubstringKernel(JSContext* cx, HandleString str, int32_t beginInt,
+ int32_t lengthInt);
+
+inline js::HashNumber HashStringChars(JSLinearString* str) {
+ JS::AutoCheckCannotGC nogc;
+ size_t len = str->length();
+ return str->hasLatin1Chars()
+ ? mozilla::HashString(str->latin1Chars(nogc), len)
+ : mozilla::HashString(str->twoByteChars(nogc), len);
+}
+
+/*** Conversions ************************************************************/
+
+/*
+ * Convert a string to a printable C string.
+ *
+ * Asserts if the input contains any non-ASCII characters.
+ */
+UniqueChars EncodeAscii(JSContext* cx, JSString* str);
+
+/*
+ * Convert a string to a printable C string.
+ */
+UniqueChars EncodeLatin1(JSContext* cx, JSString* str);
+
+enum class IdToPrintableBehavior : bool {
+ /*
+ * Request the printable representation of an identifier.
+ */
+ IdIsIdentifier,
+
+ /*
+ * Request the printable representation of a property key.
+ */
+ IdIsPropertyKey
+};
+
+/*
+ * Convert a jsid to a printable C string encoded in UTF-8.
+ */
+extern UniqueChars IdToPrintableUTF8(JSContext* cx, HandleId id,
+ IdToPrintableBehavior behavior);
+
+/*
+ * Convert a non-string value to a string, returning null after reporting an
+ * error, otherwise returning a new string reference.
+ */
+template <AllowGC allowGC>
+extern JSString* ToStringSlow(
+ JSContext* cx, typename MaybeRooted<Value, allowGC>::HandleType arg);
+
+/*
+ * Convert the given value to a string. This method includes an inline
+ * fast-path for the case where the value is already a string; if the value is
+ * known not to be a string, use ToStringSlow instead.
+ */
+template <AllowGC allowGC>
+static MOZ_ALWAYS_INLINE JSString* ToString(JSContext* cx, JS::HandleValue v) {
+ if (v.isString()) {
+ return v.toString();
+ }
+ return ToStringSlow<allowGC>(cx, v);
+}
+
+/*
+ * This function implements E-262-3 section 9.8, toString. Convert the given
+ * value to a string of characters appended to the given buffer. On error, the
+ * passed buffer may have partial results appended.
+ */
+inline bool ValueToStringBuffer(JSContext* cx, const Value& v,
+ StringBuffer& sb);
+
+} /* namespace js */
+
+MOZ_ALWAYS_INLINE bool JSString::getChar(JSContext* cx, size_t index,
+ char16_t* code) {
+ MOZ_ASSERT(index < length());
+
+ /*
+ * Optimization for one level deep ropes.
+ * This is common for the following pattern:
+ *
+ * while() {
+ * text = text.substr(0, x) + "bla" + text.substr(x)
+ * test.charCodeAt(x + 1)
+ * }
+ *
+ * Note: keep this in sync with MacroAssembler::loadStringChar and
+ * CanAttachStringChar.
+ */
+ JSString* str;
+ if (isRope()) {
+ JSRope* rope = &asRope();
+ if (uint32_t(index) < rope->leftChild()->length()) {
+ str = rope->leftChild();
+ } else {
+ str = rope->rightChild();
+ index -= rope->leftChild()->length();
+ }
+ } else {
+ str = this;
+ }
+
+ if (!str->ensureLinear(cx)) {
+ return false;
+ }
+
+ *code = str->asLinear().latin1OrTwoByteChar(index);
+ return true;
+}
+
+MOZ_ALWAYS_INLINE JSLinearString* JSString::ensureLinear(JSContext* cx) {
+ return isLinear() ? &asLinear() : asRope().flatten(cx);
+}
+
+inline JSLinearString* JSString::base() const {
+ MOZ_ASSERT(hasBase());
+ MOZ_ASSERT(!d.s.u3.base->isInline());
+ return d.s.u3.base;
+}
+
+inline JSLinearString* JSString::nurseryBaseOrRelocOverlay() const {
+ MOZ_ASSERT(hasBase());
+ return d.s.u3.base;
+}
+
+inline bool JSString::canOwnDependentChars() const {
+ // A string that could own the malloced chars used by another (dependent)
+ // string. It will not have a base and must be linear and non-inline.
+ return isLinear() && !isInline() && !hasBase();
+}
+
+inline void JSString::setBase(JSLinearString* newBase) {
+ MOZ_ASSERT(hasBase());
+ MOZ_ASSERT(!newBase->isInline());
+ d.s.u3.base = newBase;
+}
+
+template <>
+MOZ_ALWAYS_INLINE const char16_t* JSLinearString::nonInlineChars(
+ const JS::AutoRequireNoGC& nogc) const {
+ return nonInlineTwoByteChars(nogc);
+}
+
+template <>
+MOZ_ALWAYS_INLINE const JS::Latin1Char* JSLinearString::nonInlineChars(
+ const JS::AutoRequireNoGC& nogc) const {
+ return nonInlineLatin1Chars(nogc);
+}
+
+template <>
+MOZ_ALWAYS_INLINE const char16_t* JSLinearString::chars(
+ const JS::AutoRequireNoGC& nogc) const {
+ return rawTwoByteChars();
+}
+
+template <>
+MOZ_ALWAYS_INLINE const JS::Latin1Char* JSLinearString::chars(
+ const JS::AutoRequireNoGC& nogc) const {
+ return rawLatin1Chars();
+}
+
+template <>
+MOZ_ALWAYS_INLINE js::UniquePtr<JS::Latin1Char[], JS::FreePolicy>
+JSRope::copyChars<JS::Latin1Char>(JSContext* maybecx,
+ arena_id_t destArenaId) const {
+ return copyLatin1Chars(maybecx, destArenaId);
+}
+
+template <>
+MOZ_ALWAYS_INLINE JS::UniqueTwoByteChars JSRope::copyChars<char16_t>(
+ JSContext* maybecx, arena_id_t destArenaId) const {
+ return copyTwoByteChars(maybecx, destArenaId);
+}
+
+template <>
+MOZ_ALWAYS_INLINE bool JSThinInlineString::lengthFits<JS::Latin1Char>(
+ size_t length) {
+ return length <= MAX_LENGTH_LATIN1;
+}
+
+template <>
+MOZ_ALWAYS_INLINE bool JSThinInlineString::lengthFits<char16_t>(size_t length) {
+ return length <= MAX_LENGTH_TWO_BYTE;
+}
+
+template <>
+MOZ_ALWAYS_INLINE bool JSFatInlineString::lengthFits<JS::Latin1Char>(
+ size_t length) {
+ static_assert(
+ (INLINE_EXTENSION_CHARS_LATIN1 * sizeof(char)) % js::gc::CellAlignBytes ==
+ 0,
+ "fat inline strings' Latin1 characters don't exactly "
+ "fill subsequent cells and thus are wasteful");
+ static_assert(MAX_LENGTH_LATIN1 ==
+ (sizeof(JSFatInlineString) -
+ offsetof(JSFatInlineString, d.inlineStorageLatin1)) /
+ sizeof(char),
+ "MAX_LENGTH_LATIN1 must be one less than inline Latin1 "
+ "storage count");
+
+ return length <= MAX_LENGTH_LATIN1;
+}
+
+template <>
+MOZ_ALWAYS_INLINE bool JSFatInlineString::lengthFits<char16_t>(size_t length) {
+ static_assert((INLINE_EXTENSION_CHARS_TWO_BYTE * sizeof(char16_t)) %
+ js::gc::CellAlignBytes ==
+ 0,
+ "fat inline strings' char16_t characters don't exactly "
+ "fill subsequent cells and thus are wasteful");
+ static_assert(MAX_LENGTH_TWO_BYTE ==
+ (sizeof(JSFatInlineString) -
+ offsetof(JSFatInlineString, d.inlineStorageTwoByte)) /
+ sizeof(char16_t),
+ "MAX_LENGTH_TWO_BYTE must be one less than inline "
+ "char16_t storage count");
+
+ return length <= MAX_LENGTH_TWO_BYTE;
+}
+
+template <>
+MOZ_ALWAYS_INLINE bool JSInlineString::lengthFits<JS::Latin1Char>(
+ size_t length) {
+ // If it fits in a fat inline string, it fits in any inline string.
+ return JSFatInlineString::lengthFits<JS::Latin1Char>(length);
+}
+
+template <>
+MOZ_ALWAYS_INLINE bool JSInlineString::lengthFits<char16_t>(size_t length) {
+ // If it fits in a fat inline string, it fits in any inline string.
+ return JSFatInlineString::lengthFits<char16_t>(length);
+}
+
+template <>
+MOZ_ALWAYS_INLINE void JSString::setNonInlineChars(const char16_t* chars) {
+ // Check that the new buffer is located in the StringBufferArena
+ checkStringCharsArena(chars);
+ d.s.u2.nonInlineCharsTwoByte = chars;
+}
+
+template <>
+MOZ_ALWAYS_INLINE void JSString::setNonInlineChars(
+ const JS::Latin1Char* chars) {
+ // Check that the new buffer is located in the StringBufferArena
+ checkStringCharsArena(chars);
+ d.s.u2.nonInlineCharsLatin1 = chars;
+}
+
+MOZ_ALWAYS_INLINE const JS::Latin1Char* JSLinearString::rawLatin1Chars() const {
+ MOZ_ASSERT(JSString::isLinear());
+ MOZ_ASSERT(hasLatin1Chars());
+ return isInline() ? d.inlineStorageLatin1 : d.s.u2.nonInlineCharsLatin1;
+}
+
+MOZ_ALWAYS_INLINE const char16_t* JSLinearString::rawTwoByteChars() const {
+ MOZ_ASSERT(JSString::isLinear());
+ MOZ_ASSERT(hasTwoByteChars());
+ return isInline() ? d.inlineStorageTwoByte : d.s.u2.nonInlineCharsTwoByte;
+}
+
+inline js::PropertyName* JSAtom::asPropertyName() {
+ MOZ_ASSERT(!isIndex());
+ return static_cast<js::PropertyName*>(this);
+}
+
+inline bool JSLinearString::isIndex(uint32_t* indexp) const {
+ MOZ_ASSERT(JSString::isLinear());
+
+ if (isAtom()) {
+ return asAtom().isIndex(indexp);
+ }
+
+ if (JSString::hasIndexValue()) {
+ *indexp = getIndexValue();
+ return true;
+ }
+
+ return isIndexSlow(indexp);
+}
+
+inline size_t JSLinearString::allocSize() const {
+ MOZ_ASSERT(ownsMallocedChars());
+
+ size_t charSize =
+ hasLatin1Chars() ? sizeof(JS::Latin1Char) : sizeof(char16_t);
+ size_t count = isExtensible() ? asExtensible().capacity() : length();
+ return count * charSize;
+}
+
+inline size_t JSString::allocSize() const {
+ return ownsMallocedChars() ? asLinear().allocSize() : 0;
+}
+
+namespace js {
+namespace gc {
+template <>
+inline JSString* Cell::as<JSString>() {
+ MOZ_ASSERT(is<JSString>());
+ return reinterpret_cast<JSString*>(this);
+}
+
+template <>
+inline JSString* TenuredCell::as<JSString>() {
+ MOZ_ASSERT(is<JSString>());
+ return reinterpret_cast<JSString*>(this);
+}
+
+// StringRelocationOverlay assists with updating the string chars
+// pointers of dependent strings when their base strings are
+// deduplicated. It stores:
+// - nursery chars of a root base (root base is a non-dependent base), or
+// - nursery base of a dependent string
+// StringRelocationOverlay exploits the fact that the 3rd word of a JSString's
+// RelocationOverlay is not utilized and can be used to store extra information.
+class StringRelocationOverlay : public RelocationOverlay {
+ union {
+ // nursery chars of a root base
+ const JS::Latin1Char* nurseryCharsLatin1;
+ const char16_t* nurseryCharsTwoByte;
+
+ // The nursery base can be forwarded, which becomes a string relocation
+ // overlay, or it is not yet forwarded and is simply the base.
+ JSLinearString* nurseryBaseOrRelocOverlay;
+ };
+
+ public:
+ explicit StringRelocationOverlay(Cell* dst) : RelocationOverlay(dst) {
+ static_assert(sizeof(JSString) >= sizeof(StringRelocationOverlay));
+ }
+
+ static const StringRelocationOverlay* fromCell(const Cell* cell) {
+ return static_cast<const StringRelocationOverlay*>(cell);
+ }
+
+ static StringRelocationOverlay* fromCell(Cell* cell) {
+ return static_cast<StringRelocationOverlay*>(cell);
+ }
+
+ void setNext(StringRelocationOverlay* next) {
+ MOZ_ASSERT(isForwarded());
+ next_ = next;
+ }
+
+ StringRelocationOverlay* next() const {
+ MOZ_ASSERT(isForwarded());
+ return (StringRelocationOverlay*)next_;
+ }
+
+ template <typename CharT>
+ MOZ_ALWAYS_INLINE const CharT* savedNurseryChars() const;
+
+ const MOZ_ALWAYS_INLINE JS::Latin1Char* savedNurseryCharsLatin1() const {
+ return nurseryCharsLatin1;
+ }
+
+ const MOZ_ALWAYS_INLINE char16_t* savedNurseryCharsTwoByte() const {
+ return nurseryCharsTwoByte;
+ }
+
+ JSLinearString* savedNurseryBaseOrRelocOverlay() const {
+ return nurseryBaseOrRelocOverlay;
+ }
+
+ // Transform a nursery string to a StringRelocationOverlay that is forwarded
+ // to a tenured string.
+ inline static StringRelocationOverlay* forwardCell(JSString* src, Cell* dst) {
+ MOZ_ASSERT(!src->isForwarded());
+ MOZ_ASSERT(!dst->isForwarded());
+
+ JS::AutoCheckCannotGC nogc;
+ StringRelocationOverlay* overlay;
+
+ // Initialize the overlay, and remember the nursery base string if there is
+ // one, or nursery non-inlined chars if it can be the root base of other
+ // strings.
+ //
+ // The non-inlined chars of a tenured dependent string should point to the
+ // tenured root base's one with an offset. For example, a dependent string
+ // may start from the 3rd char of its root base. During tenuring, offsets
+ // of dependent strings can be computed from the nursery non-inlined chars
+ // remembered in overlays.
+ if (src->hasBase()) {
+ auto nurseryBaseOrRelocOverlay = src->nurseryBaseOrRelocOverlay();
+ overlay = new (src) StringRelocationOverlay(dst);
+ overlay->nurseryBaseOrRelocOverlay = nurseryBaseOrRelocOverlay;
+ } else if (src->canOwnDependentChars()) {
+ if (src->hasTwoByteChars()) {
+ auto nurseryCharsTwoByte = src->asLinear().twoByteChars(nogc);
+ overlay = new (src) StringRelocationOverlay(dst);
+ overlay->nurseryCharsTwoByte = nurseryCharsTwoByte;
+ } else {
+ auto nurseryCharsLatin1 = src->asLinear().latin1Chars(nogc);
+ overlay = new (src) StringRelocationOverlay(dst);
+ overlay->nurseryCharsLatin1 = nurseryCharsLatin1;
+ }
+ } else {
+ overlay = new (src) StringRelocationOverlay(dst);
+ }
+
+ return overlay;
+ }
+};
+
+template <>
+MOZ_ALWAYS_INLINE const JS::Latin1Char*
+StringRelocationOverlay::savedNurseryChars() const {
+ return savedNurseryCharsLatin1();
+}
+
+template <>
+MOZ_ALWAYS_INLINE const char16_t* StringRelocationOverlay::savedNurseryChars()
+ const {
+ return savedNurseryCharsTwoByte();
+}
+
+} // namespace gc
+} // namespace js
+
+#endif /* vm_StringType_h */
diff --git a/js/src/vm/StructuredClone.cpp b/js/src/vm/StructuredClone.cpp
new file mode 100644
index 0000000000..7eafc89113
--- /dev/null
+++ b/js/src/vm/StructuredClone.cpp
@@ -0,0 +1,4123 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file implements the structured data algorithms of
+ * https://html.spec.whatwg.org/multipage/structured-data.html
+ *
+ * The spec is in two parts:
+ *
+ * - StructuredSerialize examines a JS value and produces a graph of Records.
+ * - StructuredDeserialize walks the Records and produces a new JS value.
+ *
+ * The differences between our implementation and the spec are minor:
+ *
+ * - We call the two phases "write" and "read".
+ * - Our algorithms use an explicit work stack, rather than recursion.
+ * - Serialized data is a flat array of bytes, not a (possibly cyclic) graph
+ * of "Records".
+ * - As a consequence, we handle non-treelike object graphs differently.
+ * We serialize objects that appear in multiple places in the input as
+ * backreferences, using sequential integer indexes.
+ * See `JSStructuredCloneReader::allObjs`, our take on the "memory" map
+ * in the spec's StructuredDeserialize.
+ */
+
+#include "js/StructuredClone.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/ScopeExit.h"
+
+#include <algorithm>
+#include <memory>
+#include <utility>
+
+#include "jsdate.h"
+
+#include "builtin/DataViewObject.h"
+#include "builtin/MapObject.h"
+#include "js/Array.h" // JS::GetArrayLength, JS::IsArrayObject
+#include "js/ArrayBuffer.h" // JS::{ArrayBufferHasData,DetachArrayBuffer,IsArrayBufferObject,New{,Mapped}ArrayBufferWithContents,ReleaseMappedArrayBufferContents}
+#include "js/Date.h"
+#include "js/experimental/TypedData.h" // JS_NewDataView, JS_New{{Ui,I}nt{8,16,32},Float{32,64},Uint8Clamped,Big{Ui,I}nt64}ArrayWithBuffer
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/GCHashTable.h"
+#include "js/Object.h" // JS::GetBuiltinClass
+#include "js/PropertyAndElement.h" // JS_GetElement
+#include "js/RegExpFlags.h" // JS::RegExpFlag, JS::RegExpFlags
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "js/SharedArrayBuffer.h" // JS::IsSharedArrayBufferObject
+#include "js/Wrapper.h"
+#include "util/DifferentialTesting.h"
+#include "vm/BigIntType.h"
+#include "vm/ErrorObject.h"
+#include "vm/JSContext.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/RegExpObject.h"
+#include "vm/SavedFrame.h"
+#include "vm/SharedArrayObject.h"
+#include "vm/TypedArrayObject.h"
+#include "wasm/WasmJS.h"
+
+#include "vm/ArrayObject-inl.h"
+#include "vm/Compartment-inl.h"
+#include "vm/ErrorObject-inl.h"
+#include "vm/InlineCharBuffer-inl.h"
+#include "vm/JSContext-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/ObjectOperations-inl.h"
+#include "vm/Realm-inl.h"
+
+using namespace js;
+
+using JS::CanonicalizeNaN;
+using JS::GetBuiltinClass;
+using JS::RegExpFlag;
+using JS::RegExpFlags;
+using JS::RootedValueVector;
+using mozilla::AssertedCast;
+using mozilla::BitwiseCast;
+using mozilla::Maybe;
+using mozilla::NativeEndian;
+using mozilla::NumbersAreIdentical;
+
+// When you make updates here, make sure you consider whether you need to bump
+// the value of JS_STRUCTURED_CLONE_VERSION in js/public/StructuredClone.h. You
+// will likely need to increment the version if anything at all changes in the
+// serialization format.
+//
+// Note that SCTAG_END_OF_KEYS is written into the serialized form and should
+// have a stable ID, it need not be at the end of the list and should not be
+// used for sizing data structures.
+
+enum StructuredDataType : uint32_t {
+ // Structured data types provided by the engine
+ SCTAG_FLOAT_MAX = 0xFFF00000,
+ SCTAG_HEADER = 0xFFF10000,
+ SCTAG_NULL = 0xFFFF0000,
+ SCTAG_UNDEFINED,
+ SCTAG_BOOLEAN,
+ SCTAG_INT32,
+ SCTAG_STRING,
+ SCTAG_DATE_OBJECT,
+ SCTAG_REGEXP_OBJECT,
+ SCTAG_ARRAY_OBJECT,
+ SCTAG_OBJECT_OBJECT,
+ SCTAG_ARRAY_BUFFER_OBJECT_V2, // Old version, for backwards compatibility.
+ SCTAG_BOOLEAN_OBJECT,
+ SCTAG_STRING_OBJECT,
+ SCTAG_NUMBER_OBJECT,
+ SCTAG_BACK_REFERENCE_OBJECT,
+ SCTAG_DO_NOT_USE_1, // Required for backwards compatibility
+ SCTAG_DO_NOT_USE_2, // Required for backwards compatibility
+ SCTAG_TYPED_ARRAY_OBJECT_V2, // Old version, for backwards compatibility.
+ SCTAG_MAP_OBJECT,
+ SCTAG_SET_OBJECT,
+ SCTAG_END_OF_KEYS,
+ SCTAG_DO_NOT_USE_3, // Required for backwards compatibility
+ SCTAG_DATA_VIEW_OBJECT_V2, // Old version, for backwards compatibility.
+ SCTAG_SAVED_FRAME_OBJECT,
+
+ // No new tags before principals.
+ SCTAG_JSPRINCIPALS,
+ SCTAG_NULL_JSPRINCIPALS,
+ SCTAG_RECONSTRUCTED_SAVED_FRAME_PRINCIPALS_IS_SYSTEM,
+ SCTAG_RECONSTRUCTED_SAVED_FRAME_PRINCIPALS_IS_NOT_SYSTEM,
+
+ SCTAG_SHARED_ARRAY_BUFFER_OBJECT,
+ SCTAG_SHARED_WASM_MEMORY_OBJECT,
+
+ SCTAG_BIGINT,
+ SCTAG_BIGINT_OBJECT,
+
+ SCTAG_ARRAY_BUFFER_OBJECT,
+ SCTAG_TYPED_ARRAY_OBJECT,
+ SCTAG_DATA_VIEW_OBJECT,
+
+ SCTAG_ERROR_OBJECT,
+
+ SCTAG_TYPED_ARRAY_V1_MIN = 0xFFFF0100,
+ SCTAG_TYPED_ARRAY_V1_INT8 = SCTAG_TYPED_ARRAY_V1_MIN + Scalar::Int8,
+ SCTAG_TYPED_ARRAY_V1_UINT8 = SCTAG_TYPED_ARRAY_V1_MIN + Scalar::Uint8,
+ SCTAG_TYPED_ARRAY_V1_INT16 = SCTAG_TYPED_ARRAY_V1_MIN + Scalar::Int16,
+ SCTAG_TYPED_ARRAY_V1_UINT16 = SCTAG_TYPED_ARRAY_V1_MIN + Scalar::Uint16,
+ SCTAG_TYPED_ARRAY_V1_INT32 = SCTAG_TYPED_ARRAY_V1_MIN + Scalar::Int32,
+ SCTAG_TYPED_ARRAY_V1_UINT32 = SCTAG_TYPED_ARRAY_V1_MIN + Scalar::Uint32,
+ SCTAG_TYPED_ARRAY_V1_FLOAT32 = SCTAG_TYPED_ARRAY_V1_MIN + Scalar::Float32,
+ SCTAG_TYPED_ARRAY_V1_FLOAT64 = SCTAG_TYPED_ARRAY_V1_MIN + Scalar::Float64,
+ SCTAG_TYPED_ARRAY_V1_UINT8_CLAMPED =
+ SCTAG_TYPED_ARRAY_V1_MIN + Scalar::Uint8Clamped,
+ // BigInt64 and BigUint64 are not supported in the v1 format.
+ SCTAG_TYPED_ARRAY_V1_MAX = SCTAG_TYPED_ARRAY_V1_UINT8_CLAMPED,
+
+ // Define a separate range of numbers for Transferable-only tags, since
+ // they are not used for persistent clone buffers and therefore do not
+ // require bumping JS_STRUCTURED_CLONE_VERSION.
+ SCTAG_TRANSFER_MAP_HEADER = 0xFFFF0200,
+ SCTAG_TRANSFER_MAP_PENDING_ENTRY,
+ SCTAG_TRANSFER_MAP_ARRAY_BUFFER,
+ SCTAG_TRANSFER_MAP_STORED_ARRAY_BUFFER,
+ SCTAG_TRANSFER_MAP_END_OF_BUILTIN_TYPES,
+
+ SCTAG_END_OF_BUILTIN_TYPES
+};
+
+/*
+ * Format of transfer map:
+ * <SCTAG_TRANSFER_MAP_HEADER, TransferableMapHeader(UNREAD|TRANSFERRED)>
+ * numTransferables (64 bits)
+ * array of:
+ * <SCTAG_TRANSFER_MAP_*, TransferableOwnership>
+ * pointer (64 bits)
+ * extraData (64 bits), eg byte length for ArrayBuffers
+ */
+
+// Data associated with an SCTAG_TRANSFER_MAP_HEADER that tells whether the
+// contents have been read out yet or not.
+enum TransferableMapHeader { SCTAG_TM_UNREAD = 0, SCTAG_TM_TRANSFERRED };
+
+static inline uint64_t PairToUInt64(uint32_t tag, uint32_t data) {
+ return uint64_t(data) | (uint64_t(tag) << 32);
+}
+
+namespace js {
+
+template <typename T, typename AllocPolicy>
+struct BufferIterator {
+ using BufferList = mozilla::BufferList<AllocPolicy>;
+
+ explicit BufferIterator(const BufferList& buffer)
+ : mBuffer(buffer), mIter(buffer.Iter()) {
+ static_assert(8 % sizeof(T) == 0);
+ }
+
+ explicit BufferIterator(const JSStructuredCloneData& data)
+ : mBuffer(data.bufList_), mIter(data.Start()) {}
+
+ BufferIterator& operator=(const BufferIterator& other) {
+ MOZ_ASSERT(&mBuffer == &other.mBuffer);
+ mIter = other.mIter;
+ return *this;
+ }
+
+ [[nodiscard]] bool advance(size_t size = sizeof(T)) {
+ return mIter.AdvanceAcrossSegments(mBuffer, size);
+ }
+
+ BufferIterator operator++(int) {
+ BufferIterator ret = *this;
+ if (!advance(sizeof(T))) {
+ MOZ_ASSERT(false, "Failed to read StructuredCloneData. Data incomplete");
+ }
+ return ret;
+ }
+
+ BufferIterator& operator+=(size_t size) {
+ if (!advance(size)) {
+ MOZ_ASSERT(false, "Failed to read StructuredCloneData. Data incomplete");
+ }
+ return *this;
+ }
+
+ size_t operator-(const BufferIterator& other) const {
+ MOZ_ASSERT(&mBuffer == &other.mBuffer);
+ return mBuffer.RangeLength(other.mIter, mIter);
+ }
+
+ bool operator==(const BufferIterator& other) const {
+ return mBuffer.Start() == other.mBuffer.Start() && mIter == other.mIter;
+ }
+ bool operator!=(const BufferIterator& other) const {
+ return !(*this == other);
+ }
+
+ bool done() const { return mIter.Done(); }
+
+ [[nodiscard]] bool readBytes(char* outData, size_t size) {
+ return mBuffer.ReadBytes(mIter, outData, size);
+ }
+
+ void write(const T& data) {
+ MOZ_ASSERT(mIter.HasRoomFor(sizeof(T)));
+ *reinterpret_cast<T*>(mIter.Data()) = data;
+ }
+
+ T peek() const {
+ MOZ_ASSERT(mIter.HasRoomFor(sizeof(T)));
+ return *reinterpret_cast<T*>(mIter.Data());
+ }
+
+ bool canPeek() const { return mIter.HasRoomFor(sizeof(T)); }
+
+ const BufferList& mBuffer;
+ typename BufferList::IterImpl mIter;
+};
+
+SharedArrayRawBufferRefs& SharedArrayRawBufferRefs::operator=(
+ SharedArrayRawBufferRefs&& other) {
+ takeOwnership(std::move(other));
+ return *this;
+}
+
+SharedArrayRawBufferRefs::~SharedArrayRawBufferRefs() { releaseAll(); }
+
+bool SharedArrayRawBufferRefs::acquire(JSContext* cx,
+ SharedArrayRawBuffer* rawbuf) {
+ if (!refs_.append(rawbuf)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!rawbuf->addReference()) {
+ refs_.popBack();
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SC_SAB_REFCNT_OFLO);
+ return false;
+ }
+
+ return true;
+}
+
+bool SharedArrayRawBufferRefs::acquireAll(
+ JSContext* cx, const SharedArrayRawBufferRefs& that) {
+ if (!refs_.reserve(refs_.length() + that.refs_.length())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ for (auto ref : that.refs_) {
+ if (!ref->addReference()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SC_SAB_REFCNT_OFLO);
+ return false;
+ }
+ MOZ_ALWAYS_TRUE(refs_.append(ref));
+ }
+
+ return true;
+}
+
+void SharedArrayRawBufferRefs::takeOwnership(SharedArrayRawBufferRefs&& other) {
+ MOZ_ASSERT(refs_.empty());
+ refs_ = std::move(other.refs_);
+}
+
+void SharedArrayRawBufferRefs::releaseAll() {
+ for (auto ref : refs_) {
+ ref->dropReference();
+ }
+ refs_.clear();
+}
+
+// SCOutput provides an interface to write raw data -- eg uint64_ts, doubles,
+// arrays of bytes -- into a structured clone data output stream. It also knows
+// how to free any transferable data within that stream.
+//
+// Note that it contains a full JSStructuredCloneData object, which holds the
+// callbacks necessary to read/write/transfer/free the data. For the purpose of
+// this class, only the freeTransfer callback is relevant; the rest of the
+// callbacks are used by the higher-level JSStructuredCloneWriter interface.
+struct SCOutput {
+ public:
+ using Iter = BufferIterator<uint64_t, SystemAllocPolicy>;
+
+ SCOutput(JSContext* cx, JS::StructuredCloneScope scope);
+
+ JSContext* context() const { return cx; }
+ JS::StructuredCloneScope scope() const { return buf.scope(); }
+ void sameProcessScopeRequired() { buf.sameProcessScopeRequired(); }
+
+ [[nodiscard]] bool write(uint64_t u);
+ [[nodiscard]] bool writePair(uint32_t tag, uint32_t data);
+ [[nodiscard]] bool writeDouble(double d);
+ [[nodiscard]] bool writeBytes(const void* p, size_t nbytes);
+ [[nodiscard]] bool writeChars(const Latin1Char* p, size_t nchars);
+ [[nodiscard]] bool writeChars(const char16_t* p, size_t nchars);
+
+ template <class T>
+ [[nodiscard]] bool writeArray(const T* p, size_t nelems);
+
+ void setCallbacks(const JSStructuredCloneCallbacks* callbacks, void* closure,
+ OwnTransferablePolicy policy) {
+ buf.setCallbacks(callbacks, closure, policy);
+ }
+ void extractBuffer(JSStructuredCloneData* data) { *data = std::move(buf); }
+
+ uint64_t tell() const { return buf.Size(); }
+ uint64_t count() const { return buf.Size() / sizeof(uint64_t); }
+ Iter iter() { return Iter(buf); }
+
+ size_t offset(Iter dest) { return dest - iter(); }
+
+ JSContext* cx;
+ JSStructuredCloneData buf;
+};
+
+class SCInput {
+ public:
+ using BufferIterator = js::BufferIterator<uint64_t, SystemAllocPolicy>;
+
+ SCInput(JSContext* cx, const JSStructuredCloneData& data);
+
+ JSContext* context() const { return cx; }
+
+ static void getPtr(uint64_t data, void** ptr);
+ static void getPair(uint64_t data, uint32_t* tagp, uint32_t* datap);
+
+ [[nodiscard]] bool read(uint64_t* p);
+ [[nodiscard]] bool readPair(uint32_t* tagp, uint32_t* datap);
+ [[nodiscard]] bool readDouble(double* p);
+ [[nodiscard]] bool readBytes(void* p, size_t nbytes);
+ [[nodiscard]] bool readChars(Latin1Char* p, size_t nchars);
+ [[nodiscard]] bool readChars(char16_t* p, size_t nchars);
+ [[nodiscard]] bool readPtr(void**);
+
+ [[nodiscard]] bool get(uint64_t* p);
+ [[nodiscard]] bool getPair(uint32_t* tagp, uint32_t* datap);
+
+ const BufferIterator& tell() const { return point; }
+ void seekTo(const BufferIterator& pos) { point = pos; }
+ [[nodiscard]] bool seekBy(size_t pos) {
+ if (!point.advance(pos)) {
+ reportTruncated();
+ return false;
+ }
+ return true;
+ }
+
+ template <class T>
+ [[nodiscard]] bool readArray(T* p, size_t nelems);
+
+ bool reportTruncated() {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA, "truncated");
+ return false;
+ }
+
+ private:
+ void staticAssertions() {
+ static_assert(sizeof(char16_t) == 2);
+ static_assert(sizeof(uint32_t) == 4);
+ }
+
+ JSContext* cx;
+ BufferIterator point;
+};
+
+} // namespace js
+
+struct JSStructuredCloneReader {
+ public:
+ explicit JSStructuredCloneReader(SCInput& in, JS::StructuredCloneScope scope,
+ const JS::CloneDataPolicy& cloneDataPolicy,
+ const JSStructuredCloneCallbacks* cb,
+ void* cbClosure)
+ : in(in),
+ allowedScope(scope),
+ cloneDataPolicy(cloneDataPolicy),
+ objs(in.context()),
+ objState(in.context(), in.context()),
+ allObjs(in.context()),
+ numItemsRead(0),
+ callbacks(cb),
+ closure(cbClosure) {
+ // Avoid the need to bounds check by keeping a never-matching element at the
+ // base of the `objState` stack. This append() will always succeed because
+ // the objState vector has a nonzero MinInlineCapacity.
+ MOZ_ALWAYS_TRUE(objState.append(std::make_pair(nullptr, true)));
+ }
+
+ SCInput& input() { return in; }
+ bool read(MutableHandleValue vp, size_t nbytes);
+
+ private:
+ JSContext* context() { return in.context(); }
+
+ bool readHeader();
+ bool readTransferMap();
+
+ [[nodiscard]] bool readUint32(uint32_t* num);
+
+ template <typename CharT>
+ JSString* readStringImpl(uint32_t nchars, gc::Heap heap);
+ JSString* readString(uint32_t data, gc::Heap heap = gc::Heap::Default);
+
+ BigInt* readBigInt(uint32_t data);
+
+ [[nodiscard]] bool readTypedArray(uint32_t arrayType, uint64_t nelems,
+ MutableHandleValue vp, bool v1Read = false);
+
+ [[nodiscard]] bool readDataView(uint64_t byteLength, MutableHandleValue vp);
+
+ [[nodiscard]] bool readArrayBuffer(StructuredDataType type, uint32_t data,
+ MutableHandleValue vp);
+ [[nodiscard]] bool readV1ArrayBuffer(uint32_t arrayType, uint32_t nelems,
+ MutableHandleValue vp);
+
+ [[nodiscard]] bool readSharedArrayBuffer(MutableHandleValue vp);
+
+ [[nodiscard]] bool readSharedWasmMemory(uint32_t nbytes,
+ MutableHandleValue vp);
+
+ // A serialized SavedFrame contains primitive values in a header followed by
+ // an optional parent frame that is read recursively.
+ [[nodiscard]] JSObject* readSavedFrameHeader(uint32_t principalsTag);
+ [[nodiscard]] bool readSavedFrameFields(Handle<SavedFrame*> frameObj,
+ HandleValue parent, bool* state);
+
+ // A serialized Error contains primitive values in a header followed by
+ // 'cause', 'errors', and 'stack' fields that are read recursively.
+ [[nodiscard]] JSObject* readErrorHeader(uint32_t type);
+ [[nodiscard]] bool readErrorFields(Handle<ErrorObject*> errorObj,
+ HandleValue cause, bool* state);
+
+ [[nodiscard]] bool readMapField(Handle<MapObject*> mapObj, HandleValue key);
+
+ [[nodiscard]] bool readObjectField(HandleObject obj, HandleValue key);
+
+ [[nodiscard]] bool startRead(MutableHandleValue vp,
+ gc::Heap strHeap = gc::Heap::Default);
+
+ SCInput& in;
+
+ // The widest scope that the caller will accept, where
+ // SameProcess is the widest (it can store anything it wants)
+ // and DifferentProcess is the narrowest (it cannot contain pointers and must
+ // be valid cross-process.)
+ JS::StructuredCloneScope allowedScope;
+
+ const JS::CloneDataPolicy cloneDataPolicy;
+
+ // Stack of objects with properties remaining to be read.
+ RootedValueVector objs;
+
+ // Maintain a stack of state values for the `objs` stack. Since this is only
+ // needed for a very small subset of objects (those with a known set of
+ // object children), the state information is stored as a stack of
+ // <object, state> pairs where the object determines which element of the
+ // `objs` stack that it corresponds to. So when reading from the `objs` stack,
+ // the state will be retrieved only if the top object on `objState` matches
+ // the top object of `objs`.
+ //
+ // Currently, the only state needed is a boolean indicating whether the fields
+ // have been read yet.
+ Rooted<GCVector<std::pair<HeapPtr<JSObject*>, bool>, 8>> objState;
+
+ // Array of all objects read during this deserialization, for resolving
+ // backreferences.
+ //
+ // For backreferences to work correctly, objects must be added to this
+ // array in exactly the order expected by the version of the Writer that
+ // created the serialized data, even across years and format versions. This
+ // is usually no problem, since both algorithms do a single linear pass
+ // over the serialized data. There is one hitch; see readTypedArray.
+ //
+ // The values in this vector are objects, except it can temporarily have
+ // one `undefined` placeholder value (the readTypedArray hack).
+ RootedValueVector allObjs;
+
+ size_t numItemsRead;
+
+ // The user defined callbacks that will be used for cloning.
+ const JSStructuredCloneCallbacks* callbacks;
+
+ // Any value passed to JS_ReadStructuredClone.
+ void* closure;
+
+ friend bool JS_ReadString(JSStructuredCloneReader* r,
+ JS::MutableHandleString str);
+ friend bool JS_ReadTypedArray(JSStructuredCloneReader* r,
+ MutableHandleValue vp);
+
+ // Provide a way to detect whether any of the clone data is never used. When
+ // "tail" data (currently, this is only stored data for Transferred
+ // ArrayBuffers in the DifferentProcess scope) is read, record the first and
+ // last positions. At the end of deserialization, make sure there's nothing
+ // between the end of the main data and the beginning of the tail, nor after
+ // the end of the tail.
+ mozilla::Maybe<SCInput::BufferIterator> tailStartPos;
+ mozilla::Maybe<SCInput::BufferIterator> tailEndPos;
+};
+
+struct JSStructuredCloneWriter {
+ public:
+ explicit JSStructuredCloneWriter(JSContext* cx,
+ JS::StructuredCloneScope scope,
+ const JS::CloneDataPolicy& cloneDataPolicy,
+ const JSStructuredCloneCallbacks* cb,
+ void* cbClosure, const Value& tVal)
+ : out(cx, scope),
+ callbacks(cb),
+ closure(cbClosure),
+ objs(cx),
+ counts(cx),
+ objectEntries(cx),
+ otherEntries(cx),
+ memory(cx),
+ transferable(cx, tVal),
+ transferableObjects(cx, TransferableObjectsList(cx)),
+ cloneDataPolicy(cloneDataPolicy) {
+ out.setCallbacks(cb, cbClosure,
+ OwnTransferablePolicy::OwnsTransferablesIfAny);
+ }
+
+ bool init() {
+ return parseTransferable() && writeHeader() && writeTransferMap();
+ }
+
+ bool write(HandleValue v);
+
+ SCOutput& output() { return out; }
+
+ void extractBuffer(JSStructuredCloneData* newData) {
+ out.extractBuffer(newData);
+ }
+
+ private:
+ JSStructuredCloneWriter() = delete;
+ JSStructuredCloneWriter(const JSStructuredCloneWriter&) = delete;
+
+ JSContext* context() { return out.context(); }
+
+ bool writeHeader();
+ bool writeTransferMap();
+
+ bool writeString(uint32_t tag, JSString* str);
+ bool writeBigInt(uint32_t tag, BigInt* bi);
+ bool writeArrayBuffer(HandleObject obj);
+ bool writeTypedArray(HandleObject obj);
+ bool writeDataView(HandleObject obj);
+ bool writeSharedArrayBuffer(HandleObject obj);
+ bool writeSharedWasmMemory(HandleObject obj);
+ bool startObject(HandleObject obj, bool* backref);
+ bool writePrimitive(HandleValue v);
+ bool startWrite(HandleValue v);
+ bool traverseObject(HandleObject obj, ESClass cls);
+ bool traverseMap(HandleObject obj);
+ bool traverseSet(HandleObject obj);
+ bool traverseSavedFrame(HandleObject obj);
+ bool traverseError(HandleObject obj);
+
+ template <typename... Args>
+ bool reportDataCloneError(uint32_t errorId, Args&&... aArgs);
+
+ bool parseTransferable();
+ bool transferOwnership();
+
+ inline void checkStack();
+
+ SCOutput out;
+
+ // The user defined callbacks that will be used to signal cloning, in some
+ // cases.
+ const JSStructuredCloneCallbacks* callbacks;
+
+ // Any value passed to the callbacks.
+ void* closure;
+
+ // Vector of objects with properties remaining to be written.
+ //
+ // NB: These can span multiple compartments, so the compartment must be
+ // entered before any manipulation is performed.
+ RootedValueVector objs;
+
+ // counts[i] is the number of entries of objs[i] remaining to be written.
+ // counts.length() == objs.length() and sum(counts) == entries.length().
+ Vector<size_t> counts;
+
+ // For JSObject: Property IDs as value
+ RootedIdVector objectEntries;
+
+ // For Map: Key followed by value
+ // For Set: Key
+ // For SavedFrame: parent SavedFrame
+ // For Error: cause, errors, stack
+ RootedValueVector otherEntries;
+
+ // The "memory" list described in the HTML5 internal structured cloning
+ // algorithm. memory is a superset of objs; items are never removed from
+ // Memory until a serialization operation is finished
+ using CloneMemory = GCHashMap<JSObject*, uint32_t,
+ StableCellHasher<JSObject*>, SystemAllocPolicy>;
+ Rooted<CloneMemory> memory;
+
+ // Set of transferable objects
+ RootedValue transferable;
+ using TransferableObjectsList = GCVector<JSObject*>;
+ Rooted<TransferableObjectsList> transferableObjects;
+
+ const JS::CloneDataPolicy cloneDataPolicy;
+
+ friend bool JS_WriteString(JSStructuredCloneWriter* w, HandleString str);
+ friend bool JS_WriteTypedArray(JSStructuredCloneWriter* w, HandleValue v);
+ friend bool JS_ObjectNotWritten(JSStructuredCloneWriter* w, HandleObject obj);
+};
+
+JS_PUBLIC_API uint64_t js::GetSCOffset(JSStructuredCloneWriter* writer) {
+ MOZ_ASSERT(writer);
+ return writer->output().count() * sizeof(uint64_t);
+}
+
+static_assert(SCTAG_END_OF_BUILTIN_TYPES <= JS_SCTAG_USER_MIN);
+static_assert(JS_SCTAG_USER_MIN <= JS_SCTAG_USER_MAX);
+static_assert(Scalar::Int8 == 0);
+
+template <typename... Args>
+static void ReportDataCloneError(JSContext* cx,
+ const JSStructuredCloneCallbacks* callbacks,
+ uint32_t errorId, void* closure,
+ Args&&... aArgs) {
+ unsigned errorNumber;
+ switch (errorId) {
+ case JS_SCERR_DUP_TRANSFERABLE:
+ errorNumber = JSMSG_SC_DUP_TRANSFERABLE;
+ break;
+
+ case JS_SCERR_TRANSFERABLE:
+ errorNumber = JSMSG_SC_NOT_TRANSFERABLE;
+ break;
+
+ case JS_SCERR_UNSUPPORTED_TYPE:
+ errorNumber = JSMSG_SC_UNSUPPORTED_TYPE;
+ break;
+
+ case JS_SCERR_SHMEM_TRANSFERABLE:
+ errorNumber = JSMSG_SC_SHMEM_TRANSFERABLE;
+ break;
+
+ case JS_SCERR_TYPED_ARRAY_DETACHED:
+ errorNumber = JSMSG_TYPED_ARRAY_DETACHED;
+ break;
+
+ case JS_SCERR_WASM_NO_TRANSFER:
+ errorNumber = JSMSG_WASM_NO_TRANSFER;
+ break;
+
+ case JS_SCERR_NOT_CLONABLE:
+ errorNumber = JSMSG_SC_NOT_CLONABLE;
+ break;
+
+ case JS_SCERR_NOT_CLONABLE_WITH_COOP_COEP:
+ errorNumber = JSMSG_SC_NOT_CLONABLE_WITH_COOP_COEP;
+ break;
+
+ default:
+ MOZ_CRASH("Unkown errorId");
+ break;
+ }
+
+ if (callbacks && callbacks->reportError) {
+ MOZ_RELEASE_ASSERT(!cx->isExceptionPending());
+
+ JSErrorReport report;
+ report.errorNumber = errorNumber;
+ // Get js error message if it's possible and propagate it through callback.
+ if (JS_ExpandErrorArgumentsASCII(cx, GetErrorMessage, errorNumber, &report,
+ std::forward<Args>(aArgs)...) &&
+ report.message()) {
+ callbacks->reportError(cx, errorId, closure, report.message().c_str());
+ } else {
+ ReportOutOfMemory(cx);
+
+ callbacks->reportError(cx, errorId, closure, "");
+ }
+
+ return;
+ }
+
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, errorNumber,
+ std::forward<Args>(aArgs)...);
+}
+
+bool WriteStructuredClone(JSContext* cx, HandleValue v,
+ JSStructuredCloneData* bufp,
+ JS::StructuredCloneScope scope,
+ const JS::CloneDataPolicy& cloneDataPolicy,
+ const JSStructuredCloneCallbacks* cb, void* cbClosure,
+ const Value& transferable) {
+ JSStructuredCloneWriter w(cx, scope, cloneDataPolicy, cb, cbClosure,
+ transferable);
+ if (!w.init()) {
+ return false;
+ }
+ if (!w.write(v)) {
+ return false;
+ }
+ w.extractBuffer(bufp);
+ return true;
+}
+
+bool ReadStructuredClone(JSContext* cx, const JSStructuredCloneData& data,
+ JS::StructuredCloneScope scope, MutableHandleValue vp,
+ const JS::CloneDataPolicy& cloneDataPolicy,
+ const JSStructuredCloneCallbacks* cb,
+ void* cbClosure) {
+ if (data.Size() % 8) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA, "misaligned");
+ return false;
+ }
+ SCInput in(cx, data);
+ JSStructuredCloneReader r(in, scope, cloneDataPolicy, cb, cbClosure);
+ return r.read(vp, data.Size());
+}
+
+static bool StructuredCloneHasTransferObjects(
+ const JSStructuredCloneData& data) {
+ if (data.Size() < sizeof(uint64_t)) {
+ return false;
+ }
+
+ uint64_t u;
+ BufferIterator<uint64_t, SystemAllocPolicy> iter(data);
+ MOZ_ALWAYS_TRUE(iter.readBytes(reinterpret_cast<char*>(&u), sizeof(u)));
+ uint32_t tag = uint32_t(u >> 32);
+ return (tag == SCTAG_TRANSFER_MAP_HEADER);
+}
+
+namespace js {
+
+SCInput::SCInput(JSContext* cx, const JSStructuredCloneData& data)
+ : cx(cx), point(data) {
+ static_assert(JSStructuredCloneData::BufferList::kSegmentAlignment % 8 == 0,
+ "structured clone buffer reads should be aligned");
+ MOZ_ASSERT(data.Size() % 8 == 0);
+}
+
+bool SCInput::read(uint64_t* p) {
+ if (!point.canPeek()) {
+ *p = 0; // initialize to shut GCC up
+ return reportTruncated();
+ }
+ *p = NativeEndian::swapFromLittleEndian(point.peek());
+ MOZ_ALWAYS_TRUE(point.advance());
+ return true;
+}
+
+bool SCInput::readPair(uint32_t* tagp, uint32_t* datap) {
+ uint64_t u;
+ bool ok = read(&u);
+ if (ok) {
+ *tagp = uint32_t(u >> 32);
+ *datap = uint32_t(u);
+ }
+ return ok;
+}
+
+bool SCInput::get(uint64_t* p) {
+ if (!point.canPeek()) {
+ return reportTruncated();
+ }
+ *p = NativeEndian::swapFromLittleEndian(point.peek());
+ return true;
+}
+
+bool SCInput::getPair(uint32_t* tagp, uint32_t* datap) {
+ uint64_t u = 0;
+ if (!get(&u)) {
+ return false;
+ }
+
+ *tagp = uint32_t(u >> 32);
+ *datap = uint32_t(u);
+ return true;
+}
+
+void SCInput::getPair(uint64_t data, uint32_t* tagp, uint32_t* datap) {
+ uint64_t u = NativeEndian::swapFromLittleEndian(data);
+ *tagp = uint32_t(u >> 32);
+ *datap = uint32_t(u);
+}
+
+bool SCInput::readDouble(double* p) {
+ uint64_t u;
+ if (!read(&u)) {
+ return false;
+ }
+ *p = CanonicalizeNaN(mozilla::BitwiseCast<double>(u));
+ return true;
+}
+
+template <typename T>
+static void swapFromLittleEndianInPlace(T* ptr, size_t nelems) {
+ if (nelems > 0) {
+ NativeEndian::swapFromLittleEndianInPlace(ptr, nelems);
+ }
+}
+
+template <>
+void swapFromLittleEndianInPlace(uint8_t* ptr, size_t nelems) {}
+
+// Data is packed into an integral number of uint64_t words. Compute the
+// padding required to finish off the final word.
+static size_t ComputePadding(size_t nelems, size_t elemSize) {
+ // We want total length mod 8, where total length is nelems * sizeof(T),
+ // but that might overflow. So reduce nelems to nelems mod 8, since we are
+ // going to be doing a mod 8 later anyway.
+ size_t leftoverLength = (nelems % sizeof(uint64_t)) * elemSize;
+ return (-leftoverLength) & (sizeof(uint64_t) - 1);
+}
+
+template <class T>
+bool SCInput::readArray(T* p, size_t nelems) {
+ if (!nelems) {
+ return true;
+ }
+
+ static_assert(sizeof(uint64_t) % sizeof(T) == 0);
+
+ // Fail if nelems is so huge that computing the full size will overflow.
+ mozilla::CheckedInt<size_t> size =
+ mozilla::CheckedInt<size_t>(nelems) * sizeof(T);
+ if (!size.isValid()) {
+ return reportTruncated();
+ }
+
+ if (!point.readBytes(reinterpret_cast<char*>(p), size.value())) {
+ // To avoid any way in which uninitialized data could escape, zero the array
+ // if filling it failed.
+ std::uninitialized_fill_n(p, nelems, 0);
+ return false;
+ }
+
+ swapFromLittleEndianInPlace(p, nelems);
+
+ point += ComputePadding(nelems, sizeof(T));
+
+ return true;
+}
+
+bool SCInput::readBytes(void* p, size_t nbytes) {
+ return readArray((uint8_t*)p, nbytes);
+}
+
+bool SCInput::readChars(Latin1Char* p, size_t nchars) {
+ static_assert(sizeof(Latin1Char) == sizeof(uint8_t),
+ "Latin1Char must fit in 1 byte");
+ return readBytes(p, nchars);
+}
+
+bool SCInput::readChars(char16_t* p, size_t nchars) {
+ MOZ_ASSERT(sizeof(char16_t) == sizeof(uint16_t));
+ return readArray((uint16_t*)p, nchars);
+}
+
+void SCInput::getPtr(uint64_t data, void** ptr) {
+ *ptr = reinterpret_cast<void*>(NativeEndian::swapFromLittleEndian(data));
+}
+
+bool SCInput::readPtr(void** p) {
+ uint64_t u;
+ if (!read(&u)) {
+ return false;
+ }
+ *p = reinterpret_cast<void*>(u);
+ return true;
+}
+
+SCOutput::SCOutput(JSContext* cx, JS::StructuredCloneScope scope)
+ : cx(cx), buf(scope) {}
+
+bool SCOutput::write(uint64_t u) {
+ uint64_t v = NativeEndian::swapToLittleEndian(u);
+ if (!buf.AppendBytes(reinterpret_cast<char*>(&v), sizeof(u))) {
+ ReportOutOfMemory(context());
+ return false;
+ }
+ return true;
+}
+
+bool SCOutput::writePair(uint32_t tag, uint32_t data) {
+ // As it happens, the tag word appears after the data word in the output.
+ // This is because exponents occupy the last 2 bytes of doubles on the
+ // little-endian platforms we care most about.
+ //
+ // For example, TrueValue() is written using writePair(SCTAG_BOOLEAN, 1).
+ // PairToUInt64 produces the number 0xFFFF000200000001.
+ // That is written out as the bytes 01 00 00 00 02 00 FF FF.
+ return write(PairToUInt64(tag, data));
+}
+
+static inline double ReinterpretPairAsDouble(uint32_t tag, uint32_t data) {
+ return BitwiseCast<double>(PairToUInt64(tag, data));
+}
+
+bool SCOutput::writeDouble(double d) {
+ return write(BitwiseCast<uint64_t>(CanonicalizeNaN(d)));
+}
+
+template <class T>
+bool SCOutput::writeArray(const T* p, size_t nelems) {
+ static_assert(8 % sizeof(T) == 0);
+ static_assert(sizeof(uint64_t) % sizeof(T) == 0);
+
+ if (nelems == 0) {
+ return true;
+ }
+
+ for (size_t i = 0; i < nelems; i++) {
+ T value = NativeEndian::swapToLittleEndian(p[i]);
+ if (!buf.AppendBytes(reinterpret_cast<char*>(&value), sizeof(value))) {
+ return false;
+ }
+ }
+
+ // Zero-pad to 8 bytes boundary.
+ size_t padbytes = ComputePadding(nelems, sizeof(T));
+ char zeroes[sizeof(uint64_t)] = {0};
+ if (!buf.AppendBytes(zeroes, padbytes)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <>
+bool SCOutput::writeArray<uint8_t>(const uint8_t* p, size_t nelems) {
+ if (nelems == 0) {
+ return true;
+ }
+
+ if (!buf.AppendBytes(reinterpret_cast<const char*>(p), nelems)) {
+ return false;
+ }
+
+ // zero-pad to 8 bytes boundary
+ size_t padbytes = ComputePadding(nelems, 1);
+ char zeroes[sizeof(uint64_t)] = {0};
+ if (!buf.AppendBytes(zeroes, padbytes)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool SCOutput::writeBytes(const void* p, size_t nbytes) {
+ return writeArray((const uint8_t*)p, nbytes);
+}
+
+bool SCOutput::writeChars(const char16_t* p, size_t nchars) {
+ static_assert(sizeof(char16_t) == sizeof(uint16_t),
+ "required so that treating char16_t[] memory as uint16_t[] "
+ "memory is permissible");
+ return writeArray((const uint16_t*)p, nchars);
+}
+
+bool SCOutput::writeChars(const Latin1Char* p, size_t nchars) {
+ static_assert(sizeof(Latin1Char) == sizeof(uint8_t),
+ "Latin1Char must fit in 1 byte");
+ return writeBytes(p, nchars);
+}
+
+} // namespace js
+
+JSStructuredCloneData::~JSStructuredCloneData() { discardTransferables(); }
+
+// If the buffer contains Transferables, free them. Note that custom
+// Transferables will use the JSStructuredCloneCallbacks::freeTransfer() to
+// delete their transferables.
+void JSStructuredCloneData::discardTransferables() {
+ if (!Size()) {
+ return;
+ }
+
+ if (ownTransferables_ != OwnTransferablePolicy::OwnsTransferablesIfAny) {
+ return;
+ }
+
+ // DifferentProcess clones cannot contain pointers, so nothing needs to be
+ // released.
+ if (scope() == JS::StructuredCloneScope::DifferentProcess) {
+ return;
+ }
+
+ FreeTransferStructuredCloneOp freeTransfer = nullptr;
+ if (callbacks_) {
+ freeTransfer = callbacks_->freeTransfer;
+ }
+
+ auto point = BufferIterator<uint64_t, SystemAllocPolicy>(*this);
+ if (point.done()) {
+ return; // Empty buffer
+ }
+
+ uint32_t tag, data;
+ MOZ_RELEASE_ASSERT(point.canPeek());
+ SCInput::getPair(point.peek(), &tag, &data);
+ MOZ_ALWAYS_TRUE(point.advance());
+
+ if (tag == SCTAG_HEADER) {
+ if (point.done()) {
+ return;
+ }
+
+ MOZ_RELEASE_ASSERT(point.canPeek());
+ SCInput::getPair(point.peek(), &tag, &data);
+ MOZ_ALWAYS_TRUE(point.advance());
+ }
+
+ if (tag != SCTAG_TRANSFER_MAP_HEADER) {
+ return;
+ }
+
+ if (TransferableMapHeader(data) == SCTAG_TM_TRANSFERRED) {
+ return;
+ }
+
+ // freeTransfer should not GC
+ JS::AutoSuppressGCAnalysis nogc;
+
+ if (point.done()) {
+ return;
+ }
+
+ MOZ_RELEASE_ASSERT(point.canPeek());
+ uint64_t numTransferables = NativeEndian::swapFromLittleEndian(point.peek());
+ MOZ_ALWAYS_TRUE(point.advance());
+ while (numTransferables--) {
+ if (!point.canPeek()) {
+ return;
+ }
+
+ uint32_t ownership;
+ SCInput::getPair(point.peek(), &tag, &ownership);
+ MOZ_ALWAYS_TRUE(point.advance());
+ MOZ_ASSERT(tag >= SCTAG_TRANSFER_MAP_PENDING_ENTRY);
+ if (!point.canPeek()) {
+ return;
+ }
+
+ void* content;
+ SCInput::getPtr(point.peek(), &content);
+ MOZ_ALWAYS_TRUE(point.advance());
+ if (!point.canPeek()) {
+ return;
+ }
+
+ uint64_t extraData = NativeEndian::swapFromLittleEndian(point.peek());
+ MOZ_ALWAYS_TRUE(point.advance());
+
+ if (ownership < JS::SCTAG_TMO_FIRST_OWNED) {
+ continue;
+ }
+
+ if (ownership == JS::SCTAG_TMO_ALLOC_DATA) {
+ js_free(content);
+ } else if (ownership == JS::SCTAG_TMO_MAPPED_DATA) {
+ JS::ReleaseMappedArrayBufferContents(content, extraData);
+ } else if (freeTransfer) {
+ freeTransfer(tag, JS::TransferableOwnership(ownership), content,
+ extraData, closure_);
+ } else {
+ MOZ_ASSERT(false, "unknown ownership");
+ }
+ }
+}
+
+static_assert(JSString::MAX_LENGTH < UINT32_MAX);
+
+bool JSStructuredCloneWriter::parseTransferable() {
+ // NOTE: The transferables set is tested for non-emptiness at various
+ // junctures in structured cloning, so this set must be initialized
+ // by this method in all non-error cases.
+ MOZ_ASSERT(transferableObjects.empty(),
+ "parseTransferable called with stale data");
+
+ if (transferable.isNull() || transferable.isUndefined()) {
+ return true;
+ }
+
+ if (!transferable.isObject()) {
+ return reportDataCloneError(JS_SCERR_TRANSFERABLE);
+ }
+
+ JSContext* cx = context();
+ RootedObject array(cx, &transferable.toObject());
+ bool isArray;
+ if (!JS::IsArrayObject(cx, array, &isArray)) {
+ return false;
+ }
+ if (!isArray) {
+ return reportDataCloneError(JS_SCERR_TRANSFERABLE);
+ }
+
+ uint32_t length;
+ if (!JS::GetArrayLength(cx, array, &length)) {
+ return false;
+ }
+
+ // Initialize the set for the provided array's length.
+ if (!transferableObjects.reserve(length)) {
+ return false;
+ }
+
+ if (length == 0) {
+ return true;
+ }
+
+ RootedValue v(context());
+ RootedObject tObj(context());
+
+ for (uint32_t i = 0; i < length; ++i) {
+ if (!CheckForInterrupt(cx)) {
+ return false;
+ }
+
+ if (!JS_GetElement(cx, array, i, &v)) {
+ return false;
+ }
+
+ if (!v.isObject()) {
+ return reportDataCloneError(JS_SCERR_TRANSFERABLE);
+ }
+ tObj = &v.toObject();
+
+ RootedObject unwrappedObj(cx, CheckedUnwrapStatic(tObj));
+ if (!unwrappedObj) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+
+ // Shared memory cannot be transferred because it is not possible (nor
+ // desirable) to detach the memory in agents that already hold a
+ // reference to it.
+
+ if (unwrappedObj->is<SharedArrayBufferObject>()) {
+ return reportDataCloneError(JS_SCERR_SHMEM_TRANSFERABLE);
+ }
+
+ else if (unwrappedObj->is<WasmMemoryObject>()) {
+ if (unwrappedObj->as<WasmMemoryObject>().isShared()) {
+ return reportDataCloneError(JS_SCERR_SHMEM_TRANSFERABLE);
+ }
+ }
+
+ // External array buffers may be able to be transferred in the future,
+ // but that is not currently implemented.
+
+ else if (unwrappedObj->is<ArrayBufferObject>()) {
+ if (unwrappedObj->as<ArrayBufferObject>().isExternal()) {
+ return reportDataCloneError(JS_SCERR_TRANSFERABLE);
+ }
+ }
+
+ else {
+ if (!out.buf.callbacks_ || !out.buf.callbacks_->canTransfer) {
+ return reportDataCloneError(JS_SCERR_TRANSFERABLE);
+ }
+
+ JSAutoRealm ar(cx, unwrappedObj);
+ bool sameProcessScopeRequired = false;
+ if (!out.buf.callbacks_->canTransfer(
+ cx, unwrappedObj, &sameProcessScopeRequired, out.buf.closure_)) {
+ return reportDataCloneError(JS_SCERR_TRANSFERABLE);
+ }
+
+ if (sameProcessScopeRequired) {
+ output().sameProcessScopeRequired();
+ }
+ }
+
+ // No duplicates allowed
+ if (std::find(transferableObjects.begin(), transferableObjects.end(),
+ tObj) != transferableObjects.end()) {
+ return reportDataCloneError(JS_SCERR_DUP_TRANSFERABLE);
+ }
+
+ if (!transferableObjects.append(tObj)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename... Args>
+bool JSStructuredCloneWriter::reportDataCloneError(uint32_t errorId,
+ Args&&... aArgs) {
+ ReportDataCloneError(context(), out.buf.callbacks_, errorId, out.buf.closure_,
+ std::forward<Args>(aArgs)...);
+ return false;
+}
+
+bool JSStructuredCloneWriter::writeString(uint32_t tag, JSString* str) {
+ JSLinearString* linear = str->ensureLinear(context());
+ if (!linear) {
+ return false;
+ }
+
+#if FUZZING_JS_FUZZILLI
+ if (js::SupportDifferentialTesting()) {
+ // TODO we could always output a twoByteChar string
+ return true;
+ }
+#endif
+
+ static_assert(JSString::MAX_LENGTH <= INT32_MAX,
+ "String length must fit in 31 bits");
+
+ uint32_t length = linear->length();
+ uint32_t lengthAndEncoding =
+ length | (uint32_t(linear->hasLatin1Chars()) << 31);
+ if (!out.writePair(tag, lengthAndEncoding)) {
+ return false;
+ }
+
+ JS::AutoCheckCannotGC nogc;
+ return linear->hasLatin1Chars()
+ ? out.writeChars(linear->latin1Chars(nogc), length)
+ : out.writeChars(linear->twoByteChars(nogc), length);
+}
+
+bool JSStructuredCloneWriter::writeBigInt(uint32_t tag, BigInt* bi) {
+ bool signBit = bi->isNegative();
+ size_t length = bi->digitLength();
+ // The length must fit in 31 bits to leave room for a sign bit.
+ if (length > size_t(INT32_MAX)) {
+ return false;
+ }
+ uint32_t lengthAndSign = length | (static_cast<uint32_t>(signBit) << 31);
+
+ if (!out.writePair(tag, lengthAndSign)) {
+ return false;
+ }
+ return out.writeArray(bi->digits().data(), length);
+}
+
+inline void JSStructuredCloneWriter::checkStack() {
+#ifdef DEBUG
+ // To avoid making serialization O(n^2), limit stack-checking at 10.
+ const size_t MAX = 10;
+
+ size_t limit = std::min(counts.length(), MAX);
+ MOZ_ASSERT(objs.length() == counts.length());
+ size_t total = 0;
+ for (size_t i = 0; i < limit; i++) {
+ MOZ_ASSERT(total + counts[i] >= total);
+ total += counts[i];
+ }
+ if (counts.length() <= MAX) {
+ MOZ_ASSERT(total == objectEntries.length() + otherEntries.length());
+ } else {
+ MOZ_ASSERT(total <= objectEntries.length() + otherEntries.length());
+ }
+
+ size_t j = objs.length();
+ for (size_t i = 0; i < limit; i++) {
+ --j;
+ MOZ_ASSERT(memory.has(&objs[j].toObject()));
+ }
+#endif
+}
+
+/*
+ * Write out a typed array. Note that post-v1 structured clone buffers do not
+ * perform endianness conversion on stored data, so multibyte typed arrays
+ * cannot be deserialized into a different endianness machine. Endianness
+ * conversion would prevent sharing ArrayBuffers: if you have Int8Array and
+ * Int16Array views of the same ArrayBuffer, should the data bytes be
+ * byte-swapped when writing or not? The Int8Array requires them to not be
+ * swapped; the Int16Array requires that they are.
+ */
+bool JSStructuredCloneWriter::writeTypedArray(HandleObject obj) {
+ Rooted<TypedArrayObject*> tarr(context(),
+ obj->maybeUnwrapAs<TypedArrayObject>());
+ JSAutoRealm ar(context(), tarr);
+
+#ifdef FUZZING_JS_FUZZILLI
+ if (js::SupportDifferentialTesting() && !tarr->hasBuffer()) {
+ // fake oom because differential testing will fail
+ fprintf(stderr, "[unhandlable oom]");
+ _exit(-1);
+ return false;
+ }
+#endif
+
+ if (!TypedArrayObject::ensureHasBuffer(context(), tarr)) {
+ return false;
+ }
+
+ if (!out.writePair(SCTAG_TYPED_ARRAY_OBJECT, uint32_t(tarr->type()))) {
+ return false;
+ }
+
+ uint64_t nelems = tarr->length();
+ if (!out.write(nelems)) {
+ return false;
+ }
+
+ // Write out the ArrayBuffer tag and contents
+ RootedValue val(context(), tarr->bufferValue());
+ if (!startWrite(val)) {
+ return false;
+ }
+
+ uint64_t byteOffset = tarr->byteOffset();
+ return out.write(byteOffset);
+}
+
+bool JSStructuredCloneWriter::writeDataView(HandleObject obj) {
+ Rooted<DataViewObject*> view(context(), obj->maybeUnwrapAs<DataViewObject>());
+ JSAutoRealm ar(context(), view);
+
+ if (!out.writePair(SCTAG_DATA_VIEW_OBJECT, 0)) {
+ return false;
+ }
+
+ uint64_t byteLength = view->byteLength();
+ if (!out.write(byteLength)) {
+ return false;
+ }
+
+ // Write out the ArrayBuffer tag and contents
+ RootedValue val(context(), view->bufferValue());
+ if (!startWrite(val)) {
+ return false;
+ }
+
+ uint64_t byteOffset = view->byteOffset();
+ return out.write(byteOffset);
+}
+
+bool JSStructuredCloneWriter::writeArrayBuffer(HandleObject obj) {
+ Rooted<ArrayBufferObject*> buffer(context(),
+ obj->maybeUnwrapAs<ArrayBufferObject>());
+ JSAutoRealm ar(context(), buffer);
+
+ if (!out.writePair(SCTAG_ARRAY_BUFFER_OBJECT, 0)) {
+ return false;
+ }
+
+ uint64_t byteLength = buffer->byteLength();
+ if (!out.write(byteLength)) {
+ return false;
+ }
+
+ return out.writeBytes(buffer->dataPointer(), byteLength);
+}
+
+bool JSStructuredCloneWriter::writeSharedArrayBuffer(HandleObject obj) {
+ MOZ_ASSERT(obj->canUnwrapAs<SharedArrayBufferObject>());
+
+ if (!cloneDataPolicy.areSharedMemoryObjectsAllowed()) {
+ auto error = context()->realm()->creationOptions().getCoopAndCoepEnabled()
+ ? JS_SCERR_NOT_CLONABLE_WITH_COOP_COEP
+ : JS_SCERR_NOT_CLONABLE;
+ reportDataCloneError(error, "SharedArrayBuffer");
+ return false;
+ }
+
+ output().sameProcessScopeRequired();
+
+ // We must not transmit SAB pointers (including for WebAssembly.Memory)
+ // cross-process. The cloneDataPolicy should have guarded against this;
+ // since it did not then throw, with a very explicit message.
+
+ if (output().scope() > JS::StructuredCloneScope::SameProcess) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_SHMEM_POLICY);
+ return false;
+ }
+
+ Rooted<SharedArrayBufferObject*> sharedArrayBuffer(
+ context(), obj->maybeUnwrapAs<SharedArrayBufferObject>());
+ SharedArrayRawBuffer* rawbuf = sharedArrayBuffer->rawBufferObject();
+
+ if (!out.buf.refsHeld_.acquire(context(), rawbuf)) {
+ return false;
+ }
+
+ // We must serialize the length so that the buffer object arrives in the
+ // receiver with the same length, and not with the length read from the
+ // rawbuf - that length can be different, and it can change at any time.
+
+ intptr_t p = reinterpret_cast<intptr_t>(rawbuf);
+ uint64_t byteLength = sharedArrayBuffer->byteLength();
+ if (!(out.writePair(SCTAG_SHARED_ARRAY_BUFFER_OBJECT,
+ static_cast<uint32_t>(sizeof(p))) &&
+ out.writeBytes(&byteLength, sizeof(byteLength)) &&
+ out.writeBytes(&p, sizeof(p)))) {
+ return false;
+ }
+
+ if (callbacks && callbacks->sabCloned &&
+ !callbacks->sabCloned(context(), /*receiving=*/false, closure)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool JSStructuredCloneWriter::writeSharedWasmMemory(HandleObject obj) {
+ MOZ_ASSERT(obj->canUnwrapAs<WasmMemoryObject>());
+
+ // Check the policy here so that we can report a sane error.
+ if (!cloneDataPolicy.areSharedMemoryObjectsAllowed()) {
+ auto error = context()->realm()->creationOptions().getCoopAndCoepEnabled()
+ ? JS_SCERR_NOT_CLONABLE_WITH_COOP_COEP
+ : JS_SCERR_NOT_CLONABLE;
+ reportDataCloneError(error, "WebAssembly.Memory");
+ return false;
+ }
+
+ // If this changes, might need to change what we write.
+ MOZ_ASSERT(WasmMemoryObject::RESERVED_SLOTS == 3);
+
+ Rooted<WasmMemoryObject*> memoryObj(context(),
+ &obj->unwrapAs<WasmMemoryObject>());
+ Rooted<SharedArrayBufferObject*> sab(
+ context(), &memoryObj->buffer().as<SharedArrayBufferObject>());
+
+ return out.writePair(SCTAG_SHARED_WASM_MEMORY_OBJECT, 0) &&
+ out.writePair(SCTAG_BOOLEAN, memoryObj->isHuge()) &&
+ writeSharedArrayBuffer(sab);
+}
+
+bool JSStructuredCloneWriter::startObject(HandleObject obj, bool* backref) {
+ // Handle cycles in the object graph.
+ CloneMemory::AddPtr p = memory.lookupForAdd(obj);
+ if ((*backref = p.found())) {
+ return out.writePair(SCTAG_BACK_REFERENCE_OBJECT, p->value());
+ }
+ if (!memory.add(p, obj, memory.count())) {
+ ReportOutOfMemory(context());
+ return false;
+ }
+
+ if (memory.count() == UINT32_MAX) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_NEED_DIET, "object graph to serialize");
+ return false;
+ }
+
+ return true;
+}
+
+static bool TryAppendNativeProperties(JSContext* cx, HandleObject obj,
+ MutableHandleIdVector entries,
+ size_t* properties, bool* optimized) {
+ *optimized = false;
+
+ if (!obj->is<NativeObject>()) {
+ return true;
+ }
+
+ Handle<NativeObject*> nobj = obj.as<NativeObject>();
+ if (nobj->isIndexed() || nobj->is<TypedArrayObject>() ||
+ nobj->getClass()->getNewEnumerate() || nobj->getClass()->getEnumerate()) {
+ return true;
+ }
+
+ *optimized = true;
+
+ size_t count = 0;
+ // We iterate from the last to the first property, so the property names
+ // are already in reverse order.
+ for (ShapePropertyIter<NoGC> iter(nobj->shape()); !iter.done(); iter++) {
+ jsid id = iter->key();
+
+ // Ignore symbols and non-enumerable properties.
+ if (!iter->enumerable() || id.isSymbol()) {
+ continue;
+ }
+
+ MOZ_ASSERT(id.isString());
+ if (!entries.append(id)) {
+ return false;
+ }
+
+ count++;
+ }
+
+ // Add dense element ids in reverse order.
+ for (uint32_t i = nobj->getDenseInitializedLength(); i > 0; --i) {
+ if (nobj->getDenseElement(i - 1).isMagic(JS_ELEMENTS_HOLE)) {
+ continue;
+ }
+
+ if (!entries.append(PropertyKey::Int(i - 1))) {
+ return false;
+ }
+
+ count++;
+ }
+
+ *properties = count;
+ return true;
+}
+
+// Objects are written as a "preorder" traversal of the object graph: object
+// "headers" (the class tag and any data needed for initial construction) are
+// visited first, then the children are recursed through (where children are
+// properties, Set or Map entries, etc.). So for example
+//
+// obj1 = { key1: { key1.1: val1.1, key1.2: val1.2 }, key2: {} }
+//
+// would be stored as:
+//
+// <Object tag for obj1>
+// <key1 data>
+// <Object tag for key1's value>
+// <key1.1 data>
+// <val1.1 data>
+// <key1.2 data>
+// <val1.2 data>
+// <end-of-children marker for key1's value>
+// <key2 data>
+// <Object tag for key2's value>
+// <end-of-children marker for key2's value>
+// <end-of-children marker for obj1>
+//
+// This nests nicely (ie, an entire recursive value starts with its tag and
+// ends with its end-of-children marker) and so it can be presented indented.
+// But see traverseMap below for how this looks different for Maps.
+bool JSStructuredCloneWriter::traverseObject(HandleObject obj, ESClass cls) {
+ size_t count;
+ bool optimized = false;
+ if (!js::SupportDifferentialTesting()) {
+ if (!TryAppendNativeProperties(context(), obj, &objectEntries, &count,
+ &optimized)) {
+ return false;
+ }
+ }
+
+ if (!optimized) {
+ // Get enumerable property ids and put them in reverse order so that they
+ // will come off the stack in forward order.
+ RootedIdVector properties(context());
+ if (!GetPropertyKeys(context(), obj, JSITER_OWNONLY, &properties)) {
+ return false;
+ }
+
+ for (size_t i = properties.length(); i > 0; --i) {
+ jsid id = properties[i - 1];
+
+ MOZ_ASSERT(id.isString() || id.isInt());
+ if (!objectEntries.append(id)) {
+ return false;
+ }
+ }
+
+ count = properties.length();
+ }
+
+ // Push obj and count to the stack.
+ if (!objs.append(ObjectValue(*obj)) || !counts.append(count)) {
+ return false;
+ }
+
+ checkStack();
+
+#if DEBUG
+ ESClass cls2;
+ if (!GetBuiltinClass(context(), obj, &cls2)) {
+ return false;
+ }
+ MOZ_ASSERT(cls2 == cls);
+#endif
+
+ // Write the header for obj.
+ if (cls == ESClass::Array) {
+ uint32_t length = 0;
+ if (!JS::GetArrayLength(context(), obj, &length)) {
+ return false;
+ }
+
+ return out.writePair(SCTAG_ARRAY_OBJECT,
+ NativeEndian::swapToLittleEndian(length));
+ }
+
+ return out.writePair(SCTAG_OBJECT_OBJECT, 0);
+}
+
+// Use the same basic setup as for traverseObject, but now keys can themselves
+// be complex objects. Keys and values are visited first via startWrite(), then
+// the key's children (if any) are handled, then the value's children.
+//
+// m = new Map();
+// m.set(key1 = ..., value1 = ...)
+//
+// where key1 and value2 are both objects would be stored as
+//
+// <Map tag>
+// <key1 class tag>
+// <value1 class tag>
+// ...key1 fields...
+// <end-of-children marker for key1>
+// ...value1 fields...
+// <end-of-children marker for value1>
+// <end-of-children marker for Map>
+//
+// Notice how the end-of-children marker for key1 is sandwiched between the
+// value1 beginning and end.
+bool JSStructuredCloneWriter::traverseMap(HandleObject obj) {
+ Rooted<GCVector<Value>> newEntries(context(), GCVector<Value>(context()));
+ {
+ // If there is no wrapper, the compartment munging is a no-op.
+ RootedObject unwrapped(context(), obj->maybeUnwrapAs<MapObject>());
+ MOZ_ASSERT(unwrapped);
+ JSAutoRealm ar(context(), unwrapped);
+ if (!MapObject::getKeysAndValuesInterleaved(unwrapped, &newEntries)) {
+ return false;
+ }
+ }
+ if (!context()->compartment()->wrap(context(), &newEntries)) {
+ return false;
+ }
+
+ for (size_t i = newEntries.length(); i > 0; --i) {
+ if (!otherEntries.append(newEntries[i - 1])) {
+ return false;
+ }
+ }
+
+ // Push obj and count to the stack.
+ if (!objs.append(ObjectValue(*obj)) || !counts.append(newEntries.length())) {
+ return false;
+ }
+
+ checkStack();
+
+ // Write the header for obj.
+ return out.writePair(SCTAG_MAP_OBJECT, 0);
+}
+
+// Similar to traverseMap, only there is a single value instead of a key and
+// value, and thus no interleaving is possible: a value will be fully emitted
+// before the next value is begun.
+bool JSStructuredCloneWriter::traverseSet(HandleObject obj) {
+ Rooted<GCVector<Value>> keys(context(), GCVector<Value>(context()));
+ {
+ // If there is no wrapper, the compartment munging is a no-op.
+ RootedObject unwrapped(context(), obj->maybeUnwrapAs<SetObject>());
+ MOZ_ASSERT(unwrapped);
+ JSAutoRealm ar(context(), unwrapped);
+ if (!SetObject::keys(context(), unwrapped, &keys)) {
+ return false;
+ }
+ }
+ if (!context()->compartment()->wrap(context(), &keys)) {
+ return false;
+ }
+
+ for (size_t i = keys.length(); i > 0; --i) {
+ if (!otherEntries.append(keys[i - 1])) {
+ return false;
+ }
+ }
+
+ // Push obj and count to the stack.
+ if (!objs.append(ObjectValue(*obj)) || !counts.append(keys.length())) {
+ return false;
+ }
+
+ checkStack();
+
+ // Write the header for obj.
+ return out.writePair(SCTAG_SET_OBJECT, 0);
+}
+
+bool JSStructuredCloneWriter::traverseSavedFrame(HandleObject obj) {
+ Rooted<SavedFrame*> savedFrame(context(), obj->maybeUnwrapAs<SavedFrame>());
+ MOZ_ASSERT(savedFrame);
+
+ RootedObject parent(context(), savedFrame->getParent());
+ if (!context()->compartment()->wrap(context(), &parent)) {
+ return false;
+ }
+
+ if (!objs.append(ObjectValue(*obj)) ||
+ !otherEntries.append(parent ? ObjectValue(*parent) : NullValue()) ||
+ !counts.append(1)) {
+ return false;
+ }
+
+ checkStack();
+
+ // Write the SavedFrame tag and the SavedFrame's principals.
+
+ if (savedFrame->getPrincipals() ==
+ &ReconstructedSavedFramePrincipals::IsSystem) {
+ if (!out.writePair(SCTAG_SAVED_FRAME_OBJECT,
+ SCTAG_RECONSTRUCTED_SAVED_FRAME_PRINCIPALS_IS_SYSTEM)) {
+ return false;
+ };
+ } else if (savedFrame->getPrincipals() ==
+ &ReconstructedSavedFramePrincipals::IsNotSystem) {
+ if (!out.writePair(
+ SCTAG_SAVED_FRAME_OBJECT,
+ SCTAG_RECONSTRUCTED_SAVED_FRAME_PRINCIPALS_IS_NOT_SYSTEM)) {
+ return false;
+ }
+ } else {
+ if (auto principals = savedFrame->getPrincipals()) {
+ if (!out.writePair(SCTAG_SAVED_FRAME_OBJECT, SCTAG_JSPRINCIPALS) ||
+ !principals->write(context(), this)) {
+ return false;
+ }
+ } else {
+ if (!out.writePair(SCTAG_SAVED_FRAME_OBJECT, SCTAG_NULL_JSPRINCIPALS)) {
+ return false;
+ }
+ }
+ }
+
+ // Write the SavedFrame's reserved slots, except for the parent, which is
+ // queued on objs for further traversal.
+
+ RootedValue val(context());
+
+ val = BooleanValue(savedFrame->getMutedErrors());
+ if (!writePrimitive(val)) {
+ return false;
+ }
+
+ context()->markAtom(savedFrame->getSource());
+ val = StringValue(savedFrame->getSource());
+ if (!writePrimitive(val)) {
+ return false;
+ }
+
+ val = NumberValue(savedFrame->getLine());
+ if (!writePrimitive(val)) {
+ return false;
+ }
+
+ val = NumberValue(savedFrame->getColumn());
+ if (!writePrimitive(val)) {
+ return false;
+ }
+
+ auto name = savedFrame->getFunctionDisplayName();
+ if (name) {
+ context()->markAtom(name);
+ }
+ val = name ? StringValue(name) : NullValue();
+ if (!writePrimitive(val)) {
+ return false;
+ }
+
+ auto cause = savedFrame->getAsyncCause();
+ if (cause) {
+ context()->markAtom(cause);
+ }
+ val = cause ? StringValue(cause) : NullValue();
+ if (!writePrimitive(val)) {
+ return false;
+ }
+
+ return true;
+}
+
+// https://html.spec.whatwg.org/multipage/structured-data.html#structuredserializeinternal
+// 2.7.3 StructuredSerializeInternal ( value, forStorage [ , memory ] )
+//
+// Step 17. Otherwise, if value has an [[ErrorData]] internal slot and
+// value is not a platform object, then:
+//
+// Note: This contains custom extensions for handling non-standard properties.
+bool JSStructuredCloneWriter::traverseError(HandleObject obj) {
+ JSContext* cx = context();
+
+ // 1. Let name be ? Get(value, "name").
+ RootedValue name(cx);
+ if (!GetProperty(cx, obj, obj, cx->names().name, &name)) {
+ return false;
+ }
+
+ // 2. If name is not one of "Error", "EvalError", "RangeError",
+ // "ReferenceError", "SyntaxError", "TypeError", or "URIError",
+ // (not yet specified: or "AggregateError")
+ // then set name to "Error".
+ JSExnType type = JSEXN_ERR;
+ if (name.isString()) {
+ JSLinearString* linear = name.toString()->ensureLinear(cx);
+ if (!linear) {
+ return false;
+ }
+
+ if (EqualStrings(linear, cx->names().Error)) {
+ type = JSEXN_ERR;
+ } else if (EqualStrings(linear, cx->names().EvalError)) {
+ type = JSEXN_EVALERR;
+ } else if (EqualStrings(linear, cx->names().RangeError)) {
+ type = JSEXN_RANGEERR;
+ } else if (EqualStrings(linear, cx->names().ReferenceError)) {
+ type = JSEXN_REFERENCEERR;
+ } else if (EqualStrings(linear, cx->names().SyntaxError)) {
+ type = JSEXN_SYNTAXERR;
+ } else if (EqualStrings(linear, cx->names().TypeError)) {
+ type = JSEXN_TYPEERR;
+ } else if (EqualStrings(linear, cx->names().URIError)) {
+ type = JSEXN_URIERR;
+ } else if (EqualStrings(linear, cx->names().AggregateError)) {
+ type = JSEXN_AGGREGATEERR;
+ }
+ }
+
+ // 3. Let valueMessageDesc be ? value.[[GetOwnProperty]]("message").
+ RootedId messageId(cx, NameToId(cx->names().message));
+ Rooted<Maybe<PropertyDescriptor>> messageDesc(cx);
+ if (!GetOwnPropertyDescriptor(cx, obj, messageId, &messageDesc)) {
+ return false;
+ }
+
+ // 4. Let message be undefined if IsDataDescriptor(valueMessageDesc) is false,
+ // and ? ToString(valueMessageDesc.[[Value]]) otherwise.
+ RootedString message(cx);
+ if (messageDesc.isSome() && messageDesc->isDataDescriptor()) {
+ RootedValue messageVal(cx, messageDesc->value());
+ message = ToString<CanGC>(cx, messageVal);
+ if (!message) {
+ return false;
+ }
+ }
+
+ // 5. Set serialized to { [[Type]]: "Error", [[Name]]: name, [[Message]]:
+ // message }.
+
+ if (!objs.append(ObjectValue(*obj))) {
+ return false;
+ }
+
+ Rooted<ErrorObject*> unwrapped(cx, obj->maybeUnwrapAs<ErrorObject>());
+ MOZ_ASSERT(unwrapped);
+
+ // Non-standard: Serialize |stack|.
+ // The Error stack property is saved as SavedFrames.
+ RootedValue stack(cx, NullValue());
+ RootedObject stackObj(cx, unwrapped->stack());
+ if (stackObj && stackObj->canUnwrapAs<SavedFrame>()) {
+ stack.setObject(*stackObj);
+ if (!cx->compartment()->wrap(cx, &stack)) {
+ return false;
+ }
+ }
+ if (!otherEntries.append(stack)) {
+ return false;
+ }
+
+ // Serialize |errors|
+ if (type == JSEXN_AGGREGATEERR) {
+ RootedValue errors(cx);
+ if (!GetProperty(cx, obj, obj, cx->names().errors, &errors)) {
+ return false;
+ }
+ if (!otherEntries.append(errors)) {
+ return false;
+ }
+ } else {
+ if (!otherEntries.append(NullValue())) {
+ return false;
+ }
+ }
+
+ // Non-standard: Serialize |cause|. Because this property
+ // might be missing we also write "hasCause" later.
+ RootedId causeId(cx, NameToId(cx->names().cause));
+ Rooted<Maybe<PropertyDescriptor>> causeDesc(cx);
+ if (!GetOwnPropertyDescriptor(cx, obj, causeId, &causeDesc)) {
+ return false;
+ }
+
+ Rooted<Maybe<Value>> cause(cx);
+ if (causeDesc.isSome() && causeDesc->isDataDescriptor()) {
+ cause = mozilla::Some(causeDesc->value());
+ }
+ if (!cx->compartment()->wrap(cx, &cause)) {
+ return false;
+ }
+ if (!otherEntries.append(cause.get().valueOr(NullValue()))) {
+ return false;
+ }
+
+ // |cause| + |errors| + |stack|, pushed in reverse order
+ if (!counts.append(3)) {
+ return false;
+ }
+
+ checkStack();
+
+ if (!out.writePair(SCTAG_ERROR_OBJECT, type)) {
+ return false;
+ }
+
+ RootedValue val(cx, message ? StringValue(message) : NullValue());
+ if (!writePrimitive(val)) {
+ return false;
+ }
+
+ // hasCause
+ val = BooleanValue(cause.isSome());
+ if (!writePrimitive(val)) {
+ return false;
+ }
+
+ // Non-standard: Also serialize fileName, lineNumber and columnNumber.
+ {
+ JSAutoRealm ar(cx, unwrapped);
+ val = StringValue(unwrapped->fileName(cx));
+ }
+ if (!cx->compartment()->wrap(cx, &val) || !writePrimitive(val)) {
+ return false;
+ }
+
+ val = Int32Value(unwrapped->lineNumber());
+ if (!writePrimitive(val)) {
+ return false;
+ }
+
+ val = Int32Value(unwrapped->columnNumber());
+ return writePrimitive(val);
+}
+
+bool JSStructuredCloneWriter::writePrimitive(HandleValue v) {
+ MOZ_ASSERT(v.isPrimitive());
+ context()->check(v);
+
+ if (v.isString()) {
+ return writeString(SCTAG_STRING, v.toString());
+ } else if (v.isInt32()) {
+ if (js::SupportDifferentialTesting()) {
+ return out.writeDouble(v.toInt32());
+ }
+ return out.writePair(SCTAG_INT32, v.toInt32());
+ } else if (v.isDouble()) {
+ return out.writeDouble(v.toDouble());
+ } else if (v.isBoolean()) {
+ return out.writePair(SCTAG_BOOLEAN, v.toBoolean());
+ } else if (v.isNull()) {
+ return out.writePair(SCTAG_NULL, 0);
+ } else if (v.isUndefined()) {
+ return out.writePair(SCTAG_UNDEFINED, 0);
+ } else if (v.isBigInt()) {
+ return writeBigInt(SCTAG_BIGINT, v.toBigInt());
+ }
+
+ return reportDataCloneError(JS_SCERR_UNSUPPORTED_TYPE);
+}
+
+bool JSStructuredCloneWriter::startWrite(HandleValue v) {
+ context()->check(v);
+
+ if (v.isPrimitive()) {
+ return writePrimitive(v);
+ }
+
+ if (!v.isObject()) {
+ return reportDataCloneError(JS_SCERR_UNSUPPORTED_TYPE);
+ }
+
+ RootedObject obj(context(), &v.toObject());
+
+ bool backref;
+ if (!startObject(obj, &backref)) {
+ return false;
+ }
+ if (backref) {
+ return true;
+ }
+
+ ESClass cls;
+ if (!GetBuiltinClass(context(), obj, &cls)) {
+ return false;
+ }
+
+ switch (cls) {
+ case ESClass::Object:
+ case ESClass::Array:
+ return traverseObject(obj, cls);
+ case ESClass::Number: {
+ RootedValue unboxed(context());
+ if (!Unbox(context(), obj, &unboxed)) {
+ return false;
+ }
+ return out.writePair(SCTAG_NUMBER_OBJECT, 0) &&
+ out.writeDouble(unboxed.toNumber());
+ }
+ case ESClass::String: {
+ RootedValue unboxed(context());
+ if (!Unbox(context(), obj, &unboxed)) {
+ return false;
+ }
+ return writeString(SCTAG_STRING_OBJECT, unboxed.toString());
+ }
+ case ESClass::Boolean: {
+ RootedValue unboxed(context());
+ if (!Unbox(context(), obj, &unboxed)) {
+ return false;
+ }
+ return out.writePair(SCTAG_BOOLEAN_OBJECT, unboxed.toBoolean());
+ }
+ case ESClass::RegExp: {
+ RegExpShared* re = RegExpToShared(context(), obj);
+ if (!re) {
+ return false;
+ }
+ return out.writePair(SCTAG_REGEXP_OBJECT, re->getFlags().value()) &&
+ writeString(SCTAG_STRING, re->getSource());
+ }
+ case ESClass::ArrayBuffer: {
+ if (JS::IsArrayBufferObject(obj) && JS::ArrayBufferHasData(obj)) {
+ return writeArrayBuffer(obj);
+ }
+ break;
+ }
+ case ESClass::SharedArrayBuffer:
+ if (JS::IsSharedArrayBufferObject(obj)) {
+ return writeSharedArrayBuffer(obj);
+ }
+ break;
+ case ESClass::Date: {
+ RootedValue unboxed(context());
+ if (!Unbox(context(), obj, &unboxed)) {
+ return false;
+ }
+ return out.writePair(SCTAG_DATE_OBJECT, 0) &&
+ out.writeDouble(unboxed.toNumber());
+ }
+ case ESClass::Set:
+ return traverseSet(obj);
+ case ESClass::Map:
+ return traverseMap(obj);
+ case ESClass::Error:
+ return traverseError(obj);
+ case ESClass::BigInt: {
+ RootedValue unboxed(context());
+ if (!Unbox(context(), obj, &unboxed)) {
+ return false;
+ }
+ return writeBigInt(SCTAG_BIGINT_OBJECT, unboxed.toBigInt());
+ }
+ case ESClass::Promise:
+ case ESClass::MapIterator:
+ case ESClass::SetIterator:
+ case ESClass::Arguments:
+ case ESClass::Function:
+ break;
+
+#ifdef ENABLE_RECORD_TUPLE
+ case ESClass::Record:
+ case ESClass::Tuple:
+ MOZ_CRASH("Record and Tuple are not supported");
+#endif
+
+ case ESClass::Other: {
+ if (obj->canUnwrapAs<TypedArrayObject>()) {
+ return writeTypedArray(obj);
+ }
+ if (obj->canUnwrapAs<DataViewObject>()) {
+ return writeDataView(obj);
+ }
+ if (wasm::IsSharedWasmMemoryObject(obj)) {
+ return writeSharedWasmMemory(obj);
+ }
+ if (obj->canUnwrapAs<SavedFrame>()) {
+ return traverseSavedFrame(obj);
+ }
+ break;
+ }
+ }
+
+ if (out.buf.callbacks_ && out.buf.callbacks_->write) {
+ bool sameProcessScopeRequired = false;
+ if (!out.buf.callbacks_->write(context(), this, obj,
+ &sameProcessScopeRequired,
+ out.buf.closure_)) {
+ return false;
+ }
+
+ if (sameProcessScopeRequired) {
+ output().sameProcessScopeRequired();
+ }
+
+ return true;
+ }
+
+ return reportDataCloneError(JS_SCERR_UNSUPPORTED_TYPE);
+}
+
+bool JSStructuredCloneWriter::writeHeader() {
+ return out.writePair(SCTAG_HEADER, (uint32_t)output().scope());
+}
+
+bool JSStructuredCloneWriter::writeTransferMap() {
+ if (transferableObjects.empty()) {
+ return true;
+ }
+
+ if (!out.writePair(SCTAG_TRANSFER_MAP_HEADER, (uint32_t)SCTAG_TM_UNREAD)) {
+ return false;
+ }
+
+ if (!out.write(transferableObjects.length())) {
+ return false;
+ }
+
+ RootedObject obj(context());
+ for (auto* o : transferableObjects) {
+ obj = o;
+ if (!memory.put(obj, memory.count())) {
+ ReportOutOfMemory(context());
+ return false;
+ }
+
+ // Emit a placeholder pointer. We defer stealing the data until later
+ // (and, if necessary, detaching this object if it's an ArrayBuffer).
+ if (!out.writePair(SCTAG_TRANSFER_MAP_PENDING_ENTRY,
+ JS::SCTAG_TMO_UNFILLED)) {
+ return false;
+ }
+ if (!out.write(0)) { // Pointer to ArrayBuffer contents.
+ return false;
+ }
+ if (!out.write(0)) { // extraData
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool JSStructuredCloneWriter::transferOwnership() {
+ if (transferableObjects.empty()) {
+ return true;
+ }
+
+ // Walk along the transferables and the transfer map at the same time,
+ // grabbing out pointers from the transferables and stuffing them into the
+ // transfer map.
+ auto point = out.iter();
+ MOZ_RELEASE_ASSERT(point.canPeek());
+ MOZ_ASSERT(uint32_t(NativeEndian::swapFromLittleEndian(point.peek()) >> 32) ==
+ SCTAG_HEADER);
+ point++;
+ MOZ_RELEASE_ASSERT(point.canPeek());
+ MOZ_ASSERT(uint32_t(NativeEndian::swapFromLittleEndian(point.peek()) >> 32) ==
+ SCTAG_TRANSFER_MAP_HEADER);
+ point++;
+ MOZ_RELEASE_ASSERT(point.canPeek());
+ MOZ_ASSERT(NativeEndian::swapFromLittleEndian(point.peek()) ==
+ transferableObjects.length());
+ point++;
+
+ JSContext* cx = context();
+ RootedObject obj(cx);
+ JS::StructuredCloneScope scope = output().scope();
+ for (auto* o : transferableObjects) {
+ obj = o;
+
+ uint32_t tag;
+ JS::TransferableOwnership ownership;
+ void* content;
+ uint64_t extraData;
+
+#if DEBUG
+ SCInput::getPair(point.peek(), &tag, (uint32_t*)&ownership);
+ MOZ_ASSERT(tag == SCTAG_TRANSFER_MAP_PENDING_ENTRY);
+ MOZ_ASSERT(ownership == JS::SCTAG_TMO_UNFILLED);
+#endif
+
+ ESClass cls;
+ if (!GetBuiltinClass(cx, obj, &cls)) {
+ return false;
+ }
+
+ if (cls == ESClass::ArrayBuffer) {
+ tag = SCTAG_TRANSFER_MAP_ARRAY_BUFFER;
+
+ // The current setup of the array buffer inheritance hierarchy doesn't
+ // lend itself well to generic manipulation via proxies.
+ Rooted<ArrayBufferObject*> arrayBuffer(
+ cx, obj->maybeUnwrapAs<ArrayBufferObject>());
+ JSAutoRealm ar(cx, arrayBuffer);
+
+ if (arrayBuffer->isDetached()) {
+ reportDataCloneError(JS_SCERR_TYPED_ARRAY_DETACHED);
+ return false;
+ }
+
+ if (arrayBuffer->isPreparedForAsmJS()) {
+ reportDataCloneError(JS_SCERR_WASM_NO_TRANSFER);
+ return false;
+ }
+
+ if (scope == JS::StructuredCloneScope::DifferentProcess ||
+ scope == JS::StructuredCloneScope::DifferentProcessForIndexedDB) {
+ // Write Transferred ArrayBuffers in DifferentProcess scope at
+ // the end of the clone buffer, and store the offset within the
+ // buffer to where the ArrayBuffer was written. Note that this
+ // will invalidate the current position iterator.
+
+ size_t pointOffset = out.offset(point);
+ tag = SCTAG_TRANSFER_MAP_STORED_ARRAY_BUFFER;
+ ownership = JS::SCTAG_TMO_UNOWNED;
+ content = nullptr;
+ extraData = out.tell() -
+ pointOffset; // Offset from tag to current end of buffer
+ if (!writeArrayBuffer(arrayBuffer)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Must refresh the point iterator after its collection has
+ // been modified.
+ point = out.iter();
+ point += pointOffset;
+
+ if (!JS::DetachArrayBuffer(cx, arrayBuffer)) {
+ return false;
+ }
+ } else {
+ size_t nbytes = arrayBuffer->byteLength();
+
+ using BufferContents = ArrayBufferObject::BufferContents;
+
+ BufferContents bufContents =
+ ArrayBufferObject::extractStructuredCloneContents(cx, arrayBuffer);
+ if (!bufContents) {
+ return false; // out of memory
+ }
+
+ content = bufContents.data();
+ if (bufContents.kind() == ArrayBufferObject::MAPPED) {
+ ownership = JS::SCTAG_TMO_MAPPED_DATA;
+ } else {
+ MOZ_ASSERT(bufContents.kind() == ArrayBufferObject::MALLOCED,
+ "failing to handle new ArrayBuffer kind?");
+ ownership = JS::SCTAG_TMO_ALLOC_DATA;
+ }
+ extraData = nbytes;
+ }
+ } else {
+ if (!out.buf.callbacks_ || !out.buf.callbacks_->writeTransfer) {
+ return reportDataCloneError(JS_SCERR_TRANSFERABLE);
+ }
+ if (!out.buf.callbacks_->writeTransfer(cx, obj, out.buf.closure_, &tag,
+ &ownership, &content,
+ &extraData)) {
+ return false;
+ }
+ MOZ_ASSERT(tag > SCTAG_TRANSFER_MAP_PENDING_ENTRY);
+ }
+
+ point.write(NativeEndian::swapToLittleEndian(PairToUInt64(tag, ownership)));
+ MOZ_ALWAYS_TRUE(point.advance());
+ point.write(
+ NativeEndian::swapToLittleEndian(reinterpret_cast<uint64_t>(content)));
+ MOZ_ALWAYS_TRUE(point.advance());
+ point.write(NativeEndian::swapToLittleEndian(extraData));
+ MOZ_ALWAYS_TRUE(point.advance());
+ }
+
+#if DEBUG
+ // Make sure there aren't any more transfer map entries after the expected
+ // number we read out.
+ if (!point.done()) {
+ uint32_t tag, data;
+ SCInput::getPair(point.peek(), &tag, &data);
+ MOZ_ASSERT(tag < SCTAG_TRANSFER_MAP_HEADER ||
+ tag >= SCTAG_TRANSFER_MAP_END_OF_BUILTIN_TYPES);
+ }
+#endif
+ return true;
+}
+
+bool JSStructuredCloneWriter::write(HandleValue v) {
+ if (!startWrite(v)) {
+ return false;
+ }
+
+ RootedObject obj(context());
+ RootedValue key(context());
+ RootedValue val(context());
+ RootedId id(context());
+
+ RootedValue cause(context());
+ RootedValue errors(context());
+ RootedValue stack(context());
+
+ while (!counts.empty()) {
+ obj = &objs.back().toObject();
+ context()->check(obj);
+ if (counts.back()) {
+ counts.back()--;
+
+ ESClass cls;
+ if (!GetBuiltinClass(context(), obj, &cls)) {
+ return false;
+ }
+
+ if (cls == ESClass::Map) {
+ key = otherEntries.popCopy();
+ checkStack();
+
+ counts.back()--;
+ val = otherEntries.popCopy();
+ checkStack();
+
+ if (!startWrite(key) || !startWrite(val)) {
+ return false;
+ }
+ } else if (cls == ESClass::Set || obj->canUnwrapAs<SavedFrame>()) {
+ key = otherEntries.popCopy();
+ checkStack();
+
+ if (!startWrite(key)) {
+ return false;
+ }
+ } else if (cls == ESClass::Error) {
+ cause = otherEntries.popCopy();
+ checkStack();
+
+ counts.back()--;
+ errors = otherEntries.popCopy();
+ checkStack();
+
+ counts.back()--;
+ stack = otherEntries.popCopy();
+ checkStack();
+
+ if (!startWrite(cause) || !startWrite(errors) || !startWrite(stack)) {
+ return false;
+ }
+ } else {
+ id = objectEntries.popCopy();
+ key = IdToValue(id);
+ checkStack();
+
+ // If obj still has an own property named id, write it out.
+ bool found;
+ if (GetOwnPropertyPure(context(), obj, id, val.address(), &found)) {
+ if (found) {
+ if (!writePrimitive(key) || !startWrite(val)) {
+ return false;
+ }
+ }
+ continue;
+ }
+
+ if (!HasOwnProperty(context(), obj, id, &found)) {
+ return false;
+ }
+
+ if (found) {
+#if FUZZING_JS_FUZZILLI
+ // supress calls into user code
+ if (js::SupportDifferentialTesting()) {
+ fprintf(stderr, "Differential testing: cannot call GetProperty\n");
+ return false;
+ }
+#endif
+
+ if (!writePrimitive(key) ||
+ !GetProperty(context(), obj, obj, id, &val) || !startWrite(val)) {
+ return false;
+ }
+ }
+ }
+ } else {
+ if (!out.writePair(SCTAG_END_OF_KEYS, 0)) {
+ return false;
+ }
+ objs.popBack();
+ counts.popBack();
+ }
+ }
+
+ memory.clear();
+ return transferOwnership();
+}
+
+template <typename CharT>
+JSString* JSStructuredCloneReader::readStringImpl(uint32_t nchars,
+ gc::Heap heap) {
+ if (nchars > JSString::MAX_LENGTH) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA, "string length");
+ return nullptr;
+ }
+
+ InlineCharBuffer<CharT> chars;
+ if (!chars.maybeAlloc(context(), nchars) ||
+ !in.readChars(chars.get(), nchars)) {
+ return nullptr;
+ }
+ return chars.toStringDontDeflate(context(), nchars, heap);
+}
+
+JSString* JSStructuredCloneReader::readString(uint32_t data, gc::Heap heap) {
+ uint32_t nchars = data & BitMask(31);
+ bool latin1 = data & (1 << 31);
+ return latin1 ? readStringImpl<Latin1Char>(nchars, heap)
+ : readStringImpl<char16_t>(nchars, heap);
+}
+
+[[nodiscard]] bool JSStructuredCloneReader::readUint32(uint32_t* num) {
+ Rooted<Value> lineVal(context());
+ if (!startRead(&lineVal)) {
+ return false;
+ }
+ if (!lineVal.isInt32()) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA, "integer required");
+ return false;
+ }
+ *num = uint32_t(lineVal.toInt32());
+ return true;
+}
+
+BigInt* JSStructuredCloneReader::readBigInt(uint32_t data) {
+ size_t length = data & BitMask(31);
+ bool isNegative = data & (1 << 31);
+ if (length == 0) {
+ return BigInt::zero(context());
+ }
+ RootedBigInt result(
+ context(), BigInt::createUninitialized(context(), length, isNegative));
+ if (!result) {
+ return nullptr;
+ }
+ if (!in.readArray(result->digits().data(), length)) {
+ return nullptr;
+ }
+ return result;
+}
+
+static uint32_t TagToV1ArrayType(uint32_t tag) {
+ MOZ_ASSERT(tag >= SCTAG_TYPED_ARRAY_V1_MIN &&
+ tag <= SCTAG_TYPED_ARRAY_V1_MAX);
+ return tag - SCTAG_TYPED_ARRAY_V1_MIN;
+}
+
+bool JSStructuredCloneReader::readTypedArray(uint32_t arrayType,
+ uint64_t nelems,
+ MutableHandleValue vp,
+ bool v1Read) {
+ if (arrayType > (v1Read ? Scalar::Uint8Clamped : Scalar::BigUint64)) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "unhandled typed array element type");
+ return false;
+ }
+
+ // Push a placeholder onto the allObjs list to stand in for the typed array.
+ uint32_t placeholderIndex = allObjs.length();
+ Value dummy = UndefinedValue();
+ if (!allObjs.append(dummy)) {
+ return false;
+ }
+
+ // Read the ArrayBuffer object and its contents (but no properties)
+ RootedValue v(context());
+ uint64_t byteOffset;
+ if (v1Read) {
+ if (!readV1ArrayBuffer(arrayType, nelems, &v)) {
+ return false;
+ }
+ byteOffset = 0;
+ } else {
+ if (!startRead(&v)) {
+ return false;
+ }
+ if (!in.read(&byteOffset)) {
+ return false;
+ }
+ }
+
+ // Ensure invalid 64-bit values won't be truncated below.
+ if (nelems > ArrayBufferObject::MaxByteLength ||
+ byteOffset > ArrayBufferObject::MaxByteLength) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid typed array length or offset");
+ return false;
+ }
+
+ if (!v.isObject() || !v.toObject().is<ArrayBufferObjectMaybeShared>()) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "typed array must be backed by an ArrayBuffer");
+ return false;
+ }
+
+ RootedObject buffer(context(), &v.toObject());
+ RootedObject obj(context(), nullptr);
+
+ switch (arrayType) {
+#define CREATE_FROM_BUFFER(ExternalType, NativeType, Name) \
+ case Scalar::Name: \
+ obj = JS::TypedArray<Scalar::Name>::fromBuffer(context(), buffer, \
+ byteOffset, nelems) \
+ .asObject(); \
+ break;
+
+ JS_FOR_EACH_TYPED_ARRAY(CREATE_FROM_BUFFER)
+#undef CREATE_FROM_BUFFER
+
+ default:
+ MOZ_CRASH("Can't happen: arrayType range checked above");
+ }
+
+ if (!obj) {
+ return false;
+ }
+ vp.setObject(*obj);
+
+ allObjs[placeholderIndex].set(vp);
+
+ return true;
+}
+
+bool JSStructuredCloneReader::readDataView(uint64_t byteLength,
+ MutableHandleValue vp) {
+ // Push a placeholder onto the allObjs list to stand in for the DataView.
+ uint32_t placeholderIndex = allObjs.length();
+ Value dummy = UndefinedValue();
+ if (!allObjs.append(dummy)) {
+ return false;
+ }
+
+ // Read the ArrayBuffer object and its contents (but no properties).
+ RootedValue v(context());
+ if (!startRead(&v)) {
+ return false;
+ }
+ if (!v.isObject() || !v.toObject().is<ArrayBufferObjectMaybeShared>()) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "DataView must be backed by an ArrayBuffer");
+ return false;
+ }
+
+ // Read byteOffset.
+ uint64_t byteOffset;
+ if (!in.read(&byteOffset)) {
+ return false;
+ }
+
+ // Ensure invalid 64-bit values won't be truncated below.
+ if (byteLength > ArrayBufferObject::MaxByteLength ||
+ byteOffset > ArrayBufferObject::MaxByteLength) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid DataView length or offset");
+ return false;
+ }
+
+ RootedObject buffer(context(), &v.toObject());
+ RootedObject obj(context(),
+ JS_NewDataView(context(), buffer, byteOffset, byteLength));
+ if (!obj) {
+ return false;
+ }
+ vp.setObject(*obj);
+
+ allObjs[placeholderIndex].set(vp);
+
+ return true;
+}
+
+bool JSStructuredCloneReader::readArrayBuffer(StructuredDataType type,
+ uint32_t data,
+ MutableHandleValue vp) {
+ // V2 stores the length in |data|. The current version stores the
+ // length separately to allow larger length values.
+ uint64_t nbytes = 0;
+ if (type == SCTAG_ARRAY_BUFFER_OBJECT) {
+ if (!in.read(&nbytes)) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(type == SCTAG_ARRAY_BUFFER_OBJECT_V2);
+ nbytes = data;
+ }
+
+ // The maximum ArrayBuffer size depends on the platform, and we cast to size_t
+ // below, so we have to check this here.
+ if (nbytes > ArrayBufferObject::MaxByteLength) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return false;
+ }
+
+ JSObject* obj = ArrayBufferObject::createZeroed(context(), size_t(nbytes));
+ if (!obj) {
+ return false;
+ }
+ vp.setObject(*obj);
+ ArrayBufferObject& buffer = obj->as<ArrayBufferObject>();
+ MOZ_ASSERT(buffer.byteLength() == nbytes);
+ return in.readArray(buffer.dataPointer(), nbytes);
+}
+
+bool JSStructuredCloneReader::readSharedArrayBuffer(MutableHandleValue vp) {
+ if (!cloneDataPolicy.areIntraClusterClonableSharedObjectsAllowed() ||
+ !cloneDataPolicy.areSharedMemoryObjectsAllowed()) {
+ auto error = context()->realm()->creationOptions().getCoopAndCoepEnabled()
+ ? JS_SCERR_NOT_CLONABLE_WITH_COOP_COEP
+ : JS_SCERR_NOT_CLONABLE;
+ ReportDataCloneError(context(), callbacks, error, closure,
+ "SharedArrayBuffer");
+ return false;
+ }
+
+ uint64_t byteLength;
+ if (!in.readBytes(&byteLength, sizeof(byteLength))) {
+ return in.reportTruncated();
+ }
+
+ // The maximum ArrayBuffer size depends on the platform, and we cast to size_t
+ // below, so we have to check this here.
+ if (byteLength > ArrayBufferObject::MaxByteLength) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return false;
+ }
+
+ intptr_t p;
+ if (!in.readBytes(&p, sizeof(p))) {
+ return in.reportTruncated();
+ }
+
+ SharedArrayRawBuffer* rawbuf = reinterpret_cast<SharedArrayRawBuffer*>(p);
+
+ // There's no guarantee that the receiving agent has enabled shared memory
+ // even if the transmitting agent has done so. Ideally we'd check at the
+ // transmission point, but that's tricky, and it will be a very rare problem
+ // in any case. Just fail at the receiving end if we can't handle it.
+
+ if (!context()
+ ->realm()
+ ->creationOptions()
+ .getSharedMemoryAndAtomicsEnabled()) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_SAB_DISABLED);
+ return false;
+ }
+
+ // The new object will have a new reference to the rawbuf.
+
+ if (!rawbuf->addReference()) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_SAB_REFCNT_OFLO);
+ return false;
+ }
+
+ RootedObject obj(context(),
+ SharedArrayBufferObject::New(context(), rawbuf, byteLength));
+ if (!obj) {
+ rawbuf->dropReference();
+ return false;
+ }
+
+ // `rawbuf` is now owned by `obj`.
+
+ if (callbacks && callbacks->sabCloned &&
+ !callbacks->sabCloned(context(), /*receiving=*/true, closure)) {
+ return false;
+ }
+
+ vp.setObject(*obj);
+ return true;
+}
+
+bool JSStructuredCloneReader::readSharedWasmMemory(uint32_t nbytes,
+ MutableHandleValue vp) {
+ JSContext* cx = context();
+ if (nbytes != 0) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid shared wasm memory tag");
+ return false;
+ }
+
+ if (!cloneDataPolicy.areIntraClusterClonableSharedObjectsAllowed() ||
+ !cloneDataPolicy.areSharedMemoryObjectsAllowed()) {
+ auto error = context()->realm()->creationOptions().getCoopAndCoepEnabled()
+ ? JS_SCERR_NOT_CLONABLE_WITH_COOP_COEP
+ : JS_SCERR_NOT_CLONABLE;
+ ReportDataCloneError(cx, callbacks, error, closure, "WebAssembly.Memory");
+ return false;
+ }
+
+ // Read the isHuge flag
+ RootedValue isHuge(cx);
+ if (!startRead(&isHuge)) {
+ return false;
+ }
+
+ // Read the SharedArrayBuffer object.
+ RootedValue payload(cx);
+ if (!startRead(&payload)) {
+ return false;
+ }
+ if (!payload.isObject() ||
+ !payload.toObject().is<SharedArrayBufferObject>()) {
+ JS_ReportErrorNumberASCII(
+ context(), GetErrorMessage, nullptr, JSMSG_SC_BAD_SERIALIZED_DATA,
+ "shared wasm memory must be backed by a SharedArrayBuffer");
+ return false;
+ }
+
+ Rooted<ArrayBufferObjectMaybeShared*> sab(
+ cx, &payload.toObject().as<SharedArrayBufferObject>());
+
+ // Construct the memory.
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmMemory));
+ RootedObject memory(
+ cx, WasmMemoryObject::create(cx, sab, isHuge.toBoolean(), proto));
+ if (!memory) {
+ return false;
+ }
+
+ vp.setObject(*memory);
+ return true;
+}
+
+/*
+ * Read in the data for a structured clone version 1 ArrayBuffer, performing
+ * endianness-conversion while reading.
+ */
+bool JSStructuredCloneReader::readV1ArrayBuffer(uint32_t arrayType,
+ uint32_t nelems,
+ MutableHandleValue vp) {
+ if (arrayType > Scalar::Uint8Clamped) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid TypedArray type");
+ return false;
+ }
+
+ mozilla::CheckedInt<size_t> nbytes =
+ mozilla::CheckedInt<size_t>(nelems) *
+ TypedArrayElemSize(static_cast<Scalar::Type>(arrayType));
+ if (!nbytes.isValid() || nbytes.value() > UINT32_MAX) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid typed array size");
+ return false;
+ }
+
+ JSObject* obj = ArrayBufferObject::createZeroed(context(), nbytes.value());
+ if (!obj) {
+ return false;
+ }
+ vp.setObject(*obj);
+ ArrayBufferObject& buffer = obj->as<ArrayBufferObject>();
+ MOZ_ASSERT(buffer.byteLength() == nbytes);
+
+ switch (arrayType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ return in.readArray((uint8_t*)buffer.dataPointer(), nelems);
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ return in.readArray((uint16_t*)buffer.dataPointer(), nelems);
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ return in.readArray((uint32_t*)buffer.dataPointer(), nelems);
+ case Scalar::Float64:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ return in.readArray((uint64_t*)buffer.dataPointer(), nelems);
+ default:
+ MOZ_CRASH("Can't happen: arrayType range checked by caller");
+ }
+}
+
+static bool PrimitiveToObject(JSContext* cx, MutableHandleValue vp) {
+ JSObject* obj = js::PrimitiveToObject(cx, vp);
+ if (!obj) {
+ return false;
+ }
+
+ vp.setObject(*obj);
+ return true;
+}
+
+bool JSStructuredCloneReader::startRead(MutableHandleValue vp,
+ gc::Heap strHeap) {
+ uint32_t tag, data;
+ bool alreadAppended = false;
+
+ if (!in.readPair(&tag, &data)) {
+ return false;
+ }
+
+ numItemsRead++;
+
+ switch (tag) {
+ case SCTAG_NULL:
+ vp.setNull();
+ break;
+
+ case SCTAG_UNDEFINED:
+ vp.setUndefined();
+ break;
+
+ case SCTAG_INT32:
+ vp.setInt32(data);
+ break;
+
+ case SCTAG_BOOLEAN:
+ case SCTAG_BOOLEAN_OBJECT:
+ vp.setBoolean(!!data);
+ if (tag == SCTAG_BOOLEAN_OBJECT && !PrimitiveToObject(context(), vp)) {
+ return false;
+ }
+ break;
+
+ case SCTAG_STRING:
+ case SCTAG_STRING_OBJECT: {
+ JSString* str = readString(data, strHeap);
+ if (!str) {
+ return false;
+ }
+ vp.setString(str);
+ if (tag == SCTAG_STRING_OBJECT && !PrimitiveToObject(context(), vp)) {
+ return false;
+ }
+ break;
+ }
+
+ case SCTAG_NUMBER_OBJECT: {
+ double d;
+ if (!in.readDouble(&d)) {
+ return false;
+ }
+ vp.setDouble(CanonicalizeNaN(d));
+ if (!PrimitiveToObject(context(), vp)) {
+ return false;
+ }
+ break;
+ }
+
+ case SCTAG_BIGINT:
+ case SCTAG_BIGINT_OBJECT: {
+ RootedBigInt bi(context(), readBigInt(data));
+ if (!bi) {
+ return false;
+ }
+ vp.setBigInt(bi);
+ if (tag == SCTAG_BIGINT_OBJECT && !PrimitiveToObject(context(), vp)) {
+ return false;
+ }
+ break;
+ }
+
+ case SCTAG_DATE_OBJECT: {
+ double d;
+ if (!in.readDouble(&d)) {
+ return false;
+ }
+ JS::ClippedTime t = JS::TimeClip(d);
+ if (!NumbersAreIdentical(d, t.toDouble())) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA, "date");
+ return false;
+ }
+ JSObject* obj = NewDateObjectMsec(context(), t);
+ if (!obj) {
+ return false;
+ }
+ vp.setObject(*obj);
+ break;
+ }
+
+ case SCTAG_REGEXP_OBJECT: {
+ if ((data & RegExpFlag::AllFlags) != data) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA, "regexp");
+ return false;
+ }
+
+ RegExpFlags flags(AssertedCast<uint8_t>(data));
+
+ uint32_t tag2, stringData;
+ if (!in.readPair(&tag2, &stringData)) {
+ return false;
+ }
+ if (tag2 != SCTAG_STRING) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA, "regexp");
+ return false;
+ }
+
+ JSString* str = readString(stringData, gc::Heap::Tenured);
+ if (!str) {
+ return false;
+ }
+
+ Rooted<JSAtom*> atom(context(), AtomizeString(context(), str));
+ if (!atom) {
+ return false;
+ }
+
+ RegExpObject* reobj =
+ RegExpObject::create(context(), atom, flags, GenericObject);
+ if (!reobj) {
+ return false;
+ }
+ vp.setObject(*reobj);
+ break;
+ }
+
+ case SCTAG_ARRAY_OBJECT:
+ case SCTAG_OBJECT_OBJECT: {
+ JSObject* obj =
+ (tag == SCTAG_ARRAY_OBJECT)
+ ? (JSObject*)NewDenseUnallocatedArray(
+ context(), NativeEndian::swapFromLittleEndian(data))
+ : (JSObject*)NewPlainObject(context());
+ if (!obj || !objs.append(ObjectValue(*obj))) {
+ return false;
+ }
+ vp.setObject(*obj);
+ break;
+ }
+
+ case SCTAG_BACK_REFERENCE_OBJECT: {
+ if (data >= allObjs.length() || !allObjs[data].isObject()) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid back reference in input");
+ return false;
+ }
+ vp.set(allObjs[data]);
+ return true;
+ }
+
+ case SCTAG_TRANSFER_MAP_HEADER:
+ case SCTAG_TRANSFER_MAP_PENDING_ENTRY:
+ // We should be past all the transfer map tags.
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA, "invalid input");
+ return false;
+
+ case SCTAG_ARRAY_BUFFER_OBJECT_V2:
+ case SCTAG_ARRAY_BUFFER_OBJECT:
+ if (!readArrayBuffer(StructuredDataType(tag), data, vp)) {
+ return false;
+ }
+ break;
+
+ case SCTAG_SHARED_ARRAY_BUFFER_OBJECT:
+ if (!readSharedArrayBuffer(vp)) {
+ return false;
+ }
+ break;
+
+ case SCTAG_SHARED_WASM_MEMORY_OBJECT:
+ if (!readSharedWasmMemory(data, vp)) {
+ return false;
+ }
+ break;
+
+ case SCTAG_TYPED_ARRAY_OBJECT_V2: {
+ // readTypedArray adds the array to allObjs.
+ // V2 stores the length (nelems) in |data| and the arrayType separately.
+ uint64_t arrayType;
+ if (!in.read(&arrayType)) {
+ return false;
+ }
+ uint64_t nelems = data;
+ return readTypedArray(arrayType, nelems, vp);
+ }
+
+ case SCTAG_TYPED_ARRAY_OBJECT: {
+ // readTypedArray adds the array to allObjs.
+ // The current version stores the array type in |data| and the length
+ // (nelems) separately to support large TypedArrays.
+ uint32_t arrayType = data;
+ uint64_t nelems;
+ if (!in.read(&nelems)) {
+ return false;
+ }
+ return readTypedArray(arrayType, nelems, vp);
+ }
+
+ case SCTAG_DATA_VIEW_OBJECT_V2: {
+ // readDataView adds the array to allObjs.
+ uint64_t byteLength = data;
+ return readDataView(byteLength, vp);
+ }
+
+ case SCTAG_DATA_VIEW_OBJECT: {
+ // readDataView adds the array to allObjs.
+ uint64_t byteLength;
+ if (!in.read(&byteLength)) {
+ return false;
+ }
+ return readDataView(byteLength, vp);
+ }
+
+ case SCTAG_MAP_OBJECT: {
+ JSObject* obj = MapObject::create(context());
+ if (!obj || !objs.append(ObjectValue(*obj))) {
+ return false;
+ }
+ vp.setObject(*obj);
+ break;
+ }
+
+ case SCTAG_SET_OBJECT: {
+ JSObject* obj = SetObject::create(context());
+ if (!obj || !objs.append(ObjectValue(*obj))) {
+ return false;
+ }
+ vp.setObject(*obj);
+ break;
+ }
+
+ case SCTAG_SAVED_FRAME_OBJECT: {
+ auto* obj = readSavedFrameHeader(data);
+ if (!obj || !objs.append(ObjectValue(*obj)) ||
+ !objState.append(std::make_pair(obj, false))) {
+ return false;
+ }
+ vp.setObject(*obj);
+ break;
+ }
+
+ case SCTAG_ERROR_OBJECT: {
+ auto* obj = readErrorHeader(data);
+ if (!obj || !objs.append(ObjectValue(*obj)) ||
+ !objState.append(std::make_pair(obj, false))) {
+ return false;
+ }
+ vp.setObject(*obj);
+ break;
+ }
+
+ case SCTAG_END_OF_KEYS:
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "truncated input");
+ return false;
+ break;
+
+ default: {
+ if (tag <= SCTAG_FLOAT_MAX) {
+ double d = ReinterpretPairAsDouble(tag, data);
+ vp.setNumber(CanonicalizeNaN(d));
+ break;
+ }
+
+ if (SCTAG_TYPED_ARRAY_V1_MIN <= tag && tag <= SCTAG_TYPED_ARRAY_V1_MAX) {
+ // A v1-format typed array
+ // readTypedArray adds the array to allObjs
+ return readTypedArray(TagToV1ArrayType(tag), data, vp, true);
+ }
+
+ if (!callbacks || !callbacks->read) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "unsupported type");
+ return false;
+ }
+
+ // callbacks->read() might read other objects from the buffer.
+ // In startWrite we always write the object itself before calling
+ // the custom function. We should do the same here to keep
+ // indexing consistent.
+ uint32_t placeholderIndex = allObjs.length();
+ Value dummy = UndefinedValue();
+ if (!allObjs.append(dummy)) {
+ return false;
+ }
+ JSObject* obj =
+ callbacks->read(context(), this, cloneDataPolicy, tag, data, closure);
+ if (!obj) {
+ return false;
+ }
+ vp.setObject(*obj);
+ allObjs[placeholderIndex].set(vp);
+ alreadAppended = true;
+ }
+ }
+
+ if (!alreadAppended && vp.isObject() && !allObjs.append(vp)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool JSStructuredCloneReader::readHeader() {
+ uint32_t tag, data;
+ if (!in.getPair(&tag, &data)) {
+ return in.reportTruncated();
+ }
+
+ JS::StructuredCloneScope storedScope;
+ if (tag == SCTAG_HEADER) {
+ MOZ_ALWAYS_TRUE(in.readPair(&tag, &data));
+ storedScope = JS::StructuredCloneScope(data);
+ } else {
+ // Old structured clone buffer. We must have read it from disk.
+ storedScope = JS::StructuredCloneScope::DifferentProcessForIndexedDB;
+ }
+
+ // Backward compatibility with old structured clone buffers. Value '0' was
+ // used for SameProcessSameThread scope.
+ if ((int)storedScope == 0) {
+ storedScope = JS::StructuredCloneScope::SameProcess;
+ }
+
+ if (storedScope < JS::StructuredCloneScope::SameProcess ||
+ storedScope > JS::StructuredCloneScope::DifferentProcessForIndexedDB) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid structured clone scope");
+ return false;
+ }
+
+ if (allowedScope == JS::StructuredCloneScope::DifferentProcessForIndexedDB) {
+ // Bug 1434308 and bug 1458320 - the scopes stored in old IndexedDB
+ // clones are incorrect. Treat them as if they were DifferentProcess.
+ allowedScope = JS::StructuredCloneScope::DifferentProcess;
+ return true;
+ }
+
+ if (storedScope < allowedScope) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "incompatible structured clone scope");
+ return false;
+ }
+
+ return true;
+}
+
+bool JSStructuredCloneReader::readTransferMap() {
+ JSContext* cx = context();
+ auto headerPos = in.tell();
+
+ uint32_t tag, data;
+ if (!in.getPair(&tag, &data)) {
+ return in.reportTruncated();
+ }
+
+ if (tag != SCTAG_TRANSFER_MAP_HEADER ||
+ TransferableMapHeader(data) == SCTAG_TM_TRANSFERRED) {
+ return true;
+ }
+
+ uint64_t numTransferables;
+ MOZ_ALWAYS_TRUE(in.readPair(&tag, &data));
+ if (!in.read(&numTransferables)) {
+ return false;
+ }
+
+ for (uint64_t i = 0; i < numTransferables; i++) {
+ auto pos = in.tell();
+
+ if (!in.readPair(&tag, &data)) {
+ return false;
+ }
+
+ if (tag == SCTAG_TRANSFER_MAP_PENDING_ENTRY) {
+ ReportDataCloneError(cx, callbacks, JS_SCERR_TRANSFERABLE, closure);
+ return false;
+ }
+
+ RootedObject obj(cx);
+
+ void* content;
+ if (!in.readPtr(&content)) {
+ return false;
+ }
+
+ uint64_t extraData;
+ if (!in.read(&extraData)) {
+ return false;
+ }
+
+ if (tag == SCTAG_TRANSFER_MAP_ARRAY_BUFFER) {
+ if (allowedScope == JS::StructuredCloneScope::DifferentProcess ||
+ allowedScope ==
+ JS::StructuredCloneScope::DifferentProcessForIndexedDB) {
+ // Transferred ArrayBuffers in a DifferentProcess clone buffer
+ // are treated as if they weren't Transferred at all. We should
+ // only see SCTAG_TRANSFER_MAP_STORED_ARRAY_BUFFER.
+ ReportDataCloneError(cx, callbacks, JS_SCERR_TRANSFERABLE, closure);
+ return false;
+ }
+
+ MOZ_RELEASE_ASSERT(extraData <= ArrayBufferObject::MaxByteLength);
+ size_t nbytes = extraData;
+
+ MOZ_ASSERT(data == JS::SCTAG_TMO_ALLOC_DATA ||
+ data == JS::SCTAG_TMO_MAPPED_DATA);
+ if (data == JS::SCTAG_TMO_ALLOC_DATA) {
+ obj = JS::NewArrayBufferWithContents(cx, nbytes, content);
+ } else if (data == JS::SCTAG_TMO_MAPPED_DATA) {
+ obj = JS::NewMappedArrayBufferWithContents(cx, nbytes, content);
+ }
+ } else if (tag == SCTAG_TRANSFER_MAP_STORED_ARRAY_BUFFER) {
+ auto savedPos = in.tell();
+ auto guard = mozilla::MakeScopeExit([&] { in.seekTo(savedPos); });
+ in.seekTo(pos);
+ if (!in.seekBy(static_cast<size_t>(extraData))) {
+ return false;
+ }
+
+ if (tailStartPos.isNothing()) {
+ tailStartPos = mozilla::Some(in.tell());
+ }
+
+ uint32_t tag, data;
+ if (!in.readPair(&tag, &data)) {
+ return false;
+ }
+ if (tag != SCTAG_ARRAY_BUFFER_OBJECT_V2 &&
+ tag != SCTAG_ARRAY_BUFFER_OBJECT) {
+ ReportDataCloneError(cx, callbacks, JS_SCERR_TRANSFERABLE, closure);
+ return false;
+ }
+ RootedValue val(cx);
+ if (!readArrayBuffer(StructuredDataType(tag), data, &val)) {
+ return false;
+ }
+ obj = &val.toObject();
+ tailEndPos = mozilla::Some(in.tell());
+ } else {
+ if (!callbacks || !callbacks->readTransfer) {
+ ReportDataCloneError(cx, callbacks, JS_SCERR_TRANSFERABLE, closure);
+ return false;
+ }
+ if (!callbacks->readTransfer(cx, this, tag, content, extraData, closure,
+ &obj)) {
+ if (!cx->isExceptionPending()) {
+ ReportDataCloneError(cx, callbacks, JS_SCERR_TRANSFERABLE, closure);
+ }
+ return false;
+ }
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(!cx->isExceptionPending());
+ }
+
+ // On failure, the buffer will still own the data (since its ownership
+ // will not get set to SCTAG_TMO_UNOWNED), so the data will be freed by
+ // DiscardTransferables.
+ if (!obj) {
+ return false;
+ }
+
+ // Mark the SCTAG_TRANSFER_MAP_* entry as no longer owned by the input
+ // buffer.
+ pos.write(PairToUInt64(tag, JS::SCTAG_TMO_UNOWNED));
+ MOZ_ASSERT(!pos.done());
+
+ if (!allObjs.append(ObjectValue(*obj))) {
+ return false;
+ }
+ }
+
+ // Mark the whole transfer map as consumed.
+#ifdef DEBUG
+ SCInput::getPair(headerPos.peek(), &tag, &data);
+ MOZ_ASSERT(tag == SCTAG_TRANSFER_MAP_HEADER);
+ MOZ_ASSERT(TransferableMapHeader(data) != SCTAG_TM_TRANSFERRED);
+#endif
+ headerPos.write(
+ PairToUInt64(SCTAG_TRANSFER_MAP_HEADER, SCTAG_TM_TRANSFERRED));
+
+ return true;
+}
+
+JSObject* JSStructuredCloneReader::readSavedFrameHeader(
+ uint32_t principalsTag) {
+ Rooted<SavedFrame*> savedFrame(context(), SavedFrame::create(context()));
+ if (!savedFrame) {
+ return nullptr;
+ }
+
+ JSPrincipals* principals;
+ if (principalsTag == SCTAG_JSPRINCIPALS) {
+ if (!context()->runtime()->readPrincipals) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_UNSUPPORTED_TYPE);
+ return nullptr;
+ }
+
+ if (!context()->runtime()->readPrincipals(context(), this, &principals)) {
+ return nullptr;
+ }
+ } else if (principalsTag ==
+ SCTAG_RECONSTRUCTED_SAVED_FRAME_PRINCIPALS_IS_SYSTEM) {
+ principals = &ReconstructedSavedFramePrincipals::IsSystem;
+ principals->refcount++;
+ } else if (principalsTag ==
+ SCTAG_RECONSTRUCTED_SAVED_FRAME_PRINCIPALS_IS_NOT_SYSTEM) {
+ principals = &ReconstructedSavedFramePrincipals::IsNotSystem;
+ principals->refcount++;
+ } else if (principalsTag == SCTAG_NULL_JSPRINCIPALS) {
+ principals = nullptr;
+ } else {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "bad SavedFrame principals");
+ return nullptr;
+ }
+
+ RootedValue mutedErrors(context());
+ RootedValue source(context());
+ {
+ // Read a |mutedErrors| boolean followed by a |source| string.
+ // The |mutedErrors| boolean is present in all new structured-clone data,
+ // but in older data it will be absent and only the |source| string will be
+ // found.
+ if (!startRead(&mutedErrors)) {
+ return nullptr;
+ }
+
+ if (mutedErrors.isBoolean()) {
+ if (!startRead(&source, gc::Heap::Tenured) || !source.isString()) {
+ return nullptr;
+ }
+ } else if (mutedErrors.isString()) {
+ // Backwards compatibility: Handle missing |mutedErrors| boolean,
+ // this is actually just a |source| string.
+ source = mutedErrors;
+ mutedErrors.setBoolean(true); // Safe default value.
+ } else {
+ // Invalid type.
+ return nullptr;
+ }
+ }
+
+ savedFrame->initPrincipalsAlreadyHeldAndMutedErrors(principals,
+ mutedErrors.toBoolean());
+
+ auto atomSource = AtomizeString(context(), source.toString());
+ if (!atomSource) {
+ return nullptr;
+ }
+ savedFrame->initSource(atomSource);
+
+ RootedValue lineVal(context());
+ uint32_t line;
+ if (!readUint32(&line)) {
+ return nullptr;
+ }
+ savedFrame->initLine(line);
+
+ RootedValue columnVal(context());
+ uint32_t column;
+ if (!readUint32(&column)) {
+ return nullptr;
+ }
+ savedFrame->initColumn(column);
+
+ // Don't specify a source ID when reading a cloned saved frame, as these IDs
+ // are only valid within a specific process.
+ savedFrame->initSourceId(0);
+
+ RootedValue name(context());
+ if (!startRead(&name, gc::Heap::Tenured)) {
+ return nullptr;
+ }
+ if (!(name.isString() || name.isNull())) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid saved frame cause");
+ return nullptr;
+ }
+ JSAtom* atomName = nullptr;
+ if (name.isString()) {
+ atomName = AtomizeString(context(), name.toString());
+ if (!atomName) {
+ return nullptr;
+ }
+ }
+
+ savedFrame->initFunctionDisplayName(atomName);
+
+ RootedValue cause(context());
+ if (!startRead(&cause, gc::Heap::Tenured)) {
+ return nullptr;
+ }
+ if (!(cause.isString() || cause.isNull())) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid saved frame cause");
+ return nullptr;
+ }
+ JSAtom* atomCause = nullptr;
+ if (cause.isString()) {
+ atomCause = AtomizeString(context(), cause.toString());
+ if (!atomCause) {
+ return nullptr;
+ }
+ }
+ savedFrame->initAsyncCause(atomCause);
+
+ return savedFrame;
+}
+
+// SavedFrame object: there is one child value, the parent SavedFrame,
+// which is either null or another SavedFrame object.
+bool JSStructuredCloneReader::readSavedFrameFields(Handle<SavedFrame*> frameObj,
+ HandleValue parent,
+ bool* state) {
+ if (*state) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "multiple SavedFrame parents");
+ return false;
+ }
+
+ SavedFrame* parentFrame;
+ if (parent.isNull()) {
+ parentFrame = nullptr;
+ } else if (parent.isObject() && parent.toObject().is<SavedFrame>()) {
+ parentFrame = &parent.toObject().as<SavedFrame>();
+ } else {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid SavedFrame parent");
+ return false;
+ }
+
+ frameObj->initParent(parentFrame);
+ *state = true;
+ return true;
+}
+
+JSObject* JSStructuredCloneReader::readErrorHeader(uint32_t type) {
+ JSContext* cx = context();
+
+ switch (type) {
+ case JSEXN_ERR:
+ case JSEXN_EVALERR:
+ case JSEXN_RANGEERR:
+ case JSEXN_REFERENCEERR:
+ case JSEXN_SYNTAXERR:
+ case JSEXN_TYPEERR:
+ case JSEXN_URIERR:
+ case JSEXN_AGGREGATEERR:
+ break;
+ default:
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid error type");
+ return nullptr;
+ }
+
+ RootedString message(cx);
+ {
+ RootedValue messageVal(cx);
+ if (!startRead(&messageVal)) {
+ return nullptr;
+ }
+ if (messageVal.isString()) {
+ message = messageVal.toString();
+ } else if (!messageVal.isNull()) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid 'message' field for Error object");
+ return nullptr;
+ }
+ }
+
+ // We have to set |cause| to something if it exists, otherwise the shape
+ // would be wrong. The actual value will be overwritten later.
+ RootedValue val(cx);
+ if (!startRead(&val)) {
+ return nullptr;
+ }
+ bool hasCause = ToBoolean(val);
+ Rooted<Maybe<Value>> cause(cx, mozilla::Nothing());
+ if (hasCause) {
+ cause = mozilla::Some(BooleanValue(true));
+ }
+
+ if (!startRead(&val)) {
+ return nullptr;
+ }
+ if (!val.isString()) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid 'fileName' field for Error object");
+ return nullptr;
+ }
+ RootedString fileName(cx, val.toString());
+
+ uint32_t lineNumber, columnNumber;
+ if (!readUint32(&lineNumber) || !readUint32(&columnNumber)) {
+ return nullptr;
+ }
+
+ // The |cause| and |stack| slots of the objects might be overwritten later.
+ // For AggregateErrors the |errors| property will be added.
+ RootedObject errorObj(
+ cx, ErrorObject::create(cx, static_cast<JSExnType>(type), nullptr,
+ fileName, 0, lineNumber, columnNumber, nullptr,
+ message, cause));
+ if (!errorObj) {
+ return nullptr;
+ }
+
+ return errorObj;
+}
+
+// Error objects have 3 fields, some or all of them null: cause,
+// errors, and stack.
+bool JSStructuredCloneReader::readErrorFields(Handle<ErrorObject*> errorObj,
+ HandleValue cause, bool* state) {
+ JSContext* cx = context();
+ if (*state) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "unexpected child value seen for Error object");
+ return false;
+ }
+
+ RootedValue errors(cx);
+ RootedValue stack(cx);
+ if (!startRead(&errors) || !startRead(&stack)) {
+ return false;
+ }
+
+ bool hasCause = errorObj->getCause().isSome();
+ if (hasCause) {
+ errorObj->setCauseSlot(cause);
+ } else if (!cause.isNull()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid 'cause' field for Error object");
+ return false;
+ }
+
+ if (errorObj->type() == JSEXN_AGGREGATEERR) {
+ if (!DefineDataProperty(context(), errorObj, cx->names().errors, errors,
+ 0)) {
+ return false;
+ }
+ } else if (!errors.isNull()) {
+ JS_ReportErrorNumberASCII(
+ cx, GetErrorMessage, nullptr, JSMSG_SC_BAD_SERIALIZED_DATA,
+ "unexpected 'errors' field seen for non-AggregateError");
+ return false;
+ }
+
+ if (stack.isObject()) {
+ RootedObject stackObj(cx, &stack.toObject());
+ if (!stackObj->is<SavedFrame>()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid 'stack' field for Error object");
+ return false;
+ }
+ errorObj->setStackSlot(stack);
+ } else if (!stack.isNull()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "invalid 'stack' field for Error object");
+ return false;
+ }
+
+ *state = true;
+ return true;
+}
+
+// Read a value and treat as a key,value pair.
+bool JSStructuredCloneReader::readMapField(Handle<MapObject*> mapObj,
+ HandleValue key) {
+ RootedValue val(context());
+ if (!startRead(&val)) {
+ return false;
+ }
+ return MapObject::set(context(), mapObj, key, val);
+}
+
+// Read a value and treat as a key,value pair. Interpret as a plain property
+// value.
+bool JSStructuredCloneReader::readObjectField(HandleObject obj,
+ HandleValue key) {
+ RootedValue val(context());
+ if (!startRead(&val)) {
+ return false;
+ }
+
+ if (!key.isString() && !key.isInt32()) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "property key expected");
+ return false;
+ }
+
+ RootedId id(context());
+ if (!PrimitiveValueToId<CanGC>(context(), key, &id)) {
+ return false;
+ }
+
+ // Fast path for adding a new property to a plain object. The property names
+ // we see here should be unique, but we check for duplicates to guard against
+ // corrupt or malicious data.
+ if (id.isString() && obj->is<PlainObject>() &&
+ MOZ_LIKELY(!obj->as<PlainObject>().contains(context(), id))) {
+ return AddDataPropertyToPlainObject(context(), obj.as<PlainObject>(), id,
+ val);
+ }
+
+ // Fast path for adding an array element. The index shouldn't exceed the
+ // array's length, but we check for this in `addDenseElementNoLengthChange` to
+ // guard against corrupt or malicious data.
+ if (id.isInt() && obj->is<ArrayObject>()) {
+ ArrayObject* arr = &obj->as<ArrayObject>();
+ switch (arr->addDenseElementNoLengthChange(context(), id.toInt(), val)) {
+ case DenseElementResult::Failure:
+ return false;
+ case DenseElementResult::Success:
+ return true;
+ case DenseElementResult::Incomplete:
+ // Fall-through to slow path.
+ break;
+ }
+ }
+
+ return DefineDataProperty(context(), obj, id, val);
+}
+
+// Perform the whole recursive reading procedure.
+bool JSStructuredCloneReader::read(MutableHandleValue vp, size_t nbytes) {
+ auto startTime = mozilla::TimeStamp::Now();
+
+ if (!readHeader()) {
+ return false;
+ }
+
+ if (!readTransferMap()) {
+ return false;
+ }
+
+ MOZ_ASSERT(objs.length() == 0);
+ MOZ_ASSERT(objState.length() == 1);
+
+ // Start out by reading in the main object and pushing it onto the 'objs'
+ // stack. The data related to this object and its descendants extends from
+ // here to the SCTAG_END_OF_KEYS at the end of the stream.
+ if (!startRead(vp)) {
+ return false;
+ }
+
+ // Stop when the stack shows that all objects have been read.
+ while (objs.length() != 0) {
+ // What happens depends on the top obj on the objs stack.
+ RootedObject obj(context(), &objs.back().toObject());
+
+ uint32_t tag, data;
+ if (!in.getPair(&tag, &data)) {
+ return false;
+ }
+
+ if (tag == SCTAG_END_OF_KEYS) {
+ // Pop the current obj off the stack, since we are done with it and
+ // its children.
+ MOZ_ALWAYS_TRUE(in.readPair(&tag, &data));
+ objs.popBack();
+ if (objState.back().first == obj) {
+ objState.popBack();
+ }
+ continue;
+ }
+
+ // Remember the index of the current top of the state stack, which will
+ // correspond to the state for `obj` iff `obj` is a type that uses state.
+ // startRead() may push additional entries before the state is accessed and
+ // updated while filling in the object's data.
+ size_t objStateIdx = objState.length() - 1;
+
+ // The input stream contains a sequence of "child" values, whose
+ // interpretation depends on the type of obj. These values can be
+ // anything, and startRead() will push onto 'objs' for any non-leaf
+ // value (i.e., anything that may contain children).
+ //
+ // startRead() will allocate the (empty) object, but note that when
+ // startRead() returns, 'key' is not yet initialized with any of its
+ // properties. Those will be filled in by returning to the head of this
+ // loop, processing the first child obj, and continuing until all
+ // children have been fully created.
+ //
+ // Note that this means the ordering in the stream is a little funky for
+ // things like Map. See the comment above traverseMap() for an example.
+ RootedValue key(context());
+ if (!startRead(&key)) {
+ return false;
+ }
+
+ if (key.isNull() && !(obj->is<MapObject>() || obj->is<SetObject>() ||
+ obj->is<SavedFrame>() || obj->is<ErrorObject>())) {
+ // Backwards compatibility: Null formerly indicated the end of
+ // object properties.
+
+ // No legacy objects used the state stack.
+ MOZ_ASSERT(objState[objStateIdx].first() != obj);
+
+ objs.popBack();
+ continue;
+ }
+
+ context()->check(key);
+
+ if (obj->is<SetObject>()) {
+ // Set object: the values between obj header (from startRead()) and
+ // SCTAG_END_OF_KEYS are all interpreted as values to add to the set.
+ if (!SetObject::add(context(), obj, key)) {
+ return false;
+ }
+ } else if (obj->is<MapObject>()) {
+ Rooted<MapObject*> mapObj(context(), &obj->as<MapObject>());
+ if (!readMapField(mapObj, key)) {
+ return false;
+ }
+ } else if (obj->is<SavedFrame>()) {
+ Rooted<SavedFrame*> frameObj(context(), &obj->as<SavedFrame>());
+ MOZ_ASSERT(objState[objStateIdx].first() == obj);
+ bool state = objState[objStateIdx].second();
+ if (!readSavedFrameFields(frameObj, key, &state)) {
+ return false;
+ }
+ objState[objStateIdx].second() = state;
+ } else if (obj->is<ErrorObject>()) {
+ Rooted<ErrorObject*> errorObj(context(), &obj->as<ErrorObject>());
+ MOZ_ASSERT(objState[objStateIdx].first() == obj);
+ bool state = objState[objStateIdx].second();
+ if (!readErrorFields(errorObj, key, &state)) {
+ return false;
+ }
+ objState[objStateIdx].second() = state;
+ } else {
+ // Everything else uses a series of key,value,key,value,... Value
+ // objects.
+ if (!readObjectField(obj, key)) {
+ return false;
+ }
+ }
+ }
+
+ allObjs.clear();
+
+ // For fuzzing, it is convenient to allow extra data at the end
+ // of the input buffer so that more possible inputs are considered
+ // valid.
+#ifndef FUZZING
+ bool extraData;
+ if (tailStartPos.isSome()) {
+ // in.tell() is the end of the main data. If "tail" data was consumed,
+ // then check whether there's any data between the main data and the
+ // beginning of the tail, or after the last read point in the tail.
+ extraData = (in.tell() != *tailStartPos || !tailEndPos->done());
+ } else {
+ extraData = !in.tell().done();
+ }
+ if (extraData) {
+ JS_ReportErrorNumberASCII(context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "extra data after end");
+ return false;
+ }
+#endif
+
+ JSRuntime* rt = context()->runtime();
+ rt->metrics().DESERIALIZE_BYTES(nbytes);
+ rt->metrics().DESERIALIZE_ITEMS(numItemsRead);
+ mozilla::TimeDuration elapsed = mozilla::TimeStamp::Now() - startTime;
+ rt->metrics().DESERIALIZE_US(elapsed);
+
+ return true;
+}
+
+JS_PUBLIC_API bool JS_ReadStructuredClone(
+ JSContext* cx, const JSStructuredCloneData& buf, uint32_t version,
+ JS::StructuredCloneScope scope, MutableHandleValue vp,
+ const JS::CloneDataPolicy& cloneDataPolicy,
+ const JSStructuredCloneCallbacks* optionalCallbacks, void* closure) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ if (version > JS_STRUCTURED_CLONE_VERSION) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_CLONE_VERSION);
+ return false;
+ }
+ const JSStructuredCloneCallbacks* callbacks = optionalCallbacks;
+ return ReadStructuredClone(cx, buf, scope, vp, cloneDataPolicy, callbacks,
+ closure);
+}
+
+JS_PUBLIC_API bool JS_WriteStructuredClone(
+ JSContext* cx, HandleValue value, JSStructuredCloneData* bufp,
+ JS::StructuredCloneScope scope, const JS::CloneDataPolicy& cloneDataPolicy,
+ const JSStructuredCloneCallbacks* optionalCallbacks, void* closure,
+ HandleValue transferable) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+ cx->check(value);
+
+ const JSStructuredCloneCallbacks* callbacks = optionalCallbacks;
+ return WriteStructuredClone(cx, value, bufp, scope, cloneDataPolicy,
+ callbacks, closure, transferable);
+}
+
+JS_PUBLIC_API bool JS_StructuredCloneHasTransferables(
+ JSStructuredCloneData& data, bool* hasTransferable) {
+ *hasTransferable = StructuredCloneHasTransferObjects(data);
+ return true;
+}
+
+JS_PUBLIC_API bool JS_StructuredClone(
+ JSContext* cx, HandleValue value, MutableHandleValue vp,
+ const JSStructuredCloneCallbacks* optionalCallbacks, void* closure) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ // Strings are associated with zones, not compartments,
+ // so we copy the string by wrapping it.
+ if (value.isString()) {
+ RootedString strValue(cx, value.toString());
+ if (!cx->compartment()->wrap(cx, &strValue)) {
+ return false;
+ }
+ vp.setString(strValue);
+ return true;
+ }
+
+ const JSStructuredCloneCallbacks* callbacks = optionalCallbacks;
+
+ JSAutoStructuredCloneBuffer buf(JS::StructuredCloneScope::SameProcess,
+ callbacks, closure);
+ {
+ if (value.isObject()) {
+ RootedObject obj(cx, &value.toObject());
+ obj = CheckedUnwrapStatic(obj);
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+ AutoRealm ar(cx, obj);
+ RootedValue unwrappedVal(cx, ObjectValue(*obj));
+ if (!buf.write(cx, unwrappedVal, callbacks, closure)) {
+ return false;
+ }
+ } else {
+ if (!buf.write(cx, value, callbacks, closure)) {
+ return false;
+ }
+ }
+ }
+
+ return buf.read(cx, vp, JS::CloneDataPolicy(), callbacks, closure);
+}
+
+JSAutoStructuredCloneBuffer::JSAutoStructuredCloneBuffer(
+ JSAutoStructuredCloneBuffer&& other)
+ : data_(other.scope()) {
+ version_ = other.version_;
+ other.giveTo(&data_);
+}
+
+JSAutoStructuredCloneBuffer& JSAutoStructuredCloneBuffer::operator=(
+ JSAutoStructuredCloneBuffer&& other) {
+ MOZ_ASSERT(&other != this);
+ MOZ_ASSERT(scope() == other.scope());
+ clear();
+ version_ = other.version_;
+ other.giveTo(&data_);
+ return *this;
+}
+
+void JSAutoStructuredCloneBuffer::clear() {
+ data_.discardTransferables();
+ data_.ownTransferables_ = OwnTransferablePolicy::NoTransferables;
+ data_.refsHeld_.releaseAll();
+ data_.Clear();
+ version_ = 0;
+}
+
+void JSAutoStructuredCloneBuffer::adopt(
+ JSStructuredCloneData&& data, uint32_t version,
+ const JSStructuredCloneCallbacks* callbacks, void* closure) {
+ clear();
+ data_ = std::move(data);
+ version_ = version;
+ data_.setCallbacks(callbacks, closure,
+ OwnTransferablePolicy::OwnsTransferablesIfAny);
+}
+
+void JSAutoStructuredCloneBuffer::giveTo(JSStructuredCloneData* data) {
+ *data = std::move(data_);
+ version_ = 0;
+ data_.setCallbacks(nullptr, nullptr, OwnTransferablePolicy::NoTransferables);
+ data_.Clear();
+}
+
+bool JSAutoStructuredCloneBuffer::read(
+ JSContext* cx, MutableHandleValue vp,
+ const JS::CloneDataPolicy& cloneDataPolicy,
+ const JSStructuredCloneCallbacks* optionalCallbacks, void* closure) {
+ MOZ_ASSERT(cx);
+ return !!JS_ReadStructuredClone(
+ cx, data_, version_, data_.scope(), vp, cloneDataPolicy,
+ optionalCallbacks ? optionalCallbacks : data_.callbacks_,
+ optionalCallbacks ? closure : data_.closure_);
+}
+
+bool JSAutoStructuredCloneBuffer::write(
+ JSContext* cx, HandleValue value,
+ const JSStructuredCloneCallbacks* optionalCallbacks, void* closure) {
+ HandleValue transferable = UndefinedHandleValue;
+ return write(cx, value, transferable, JS::CloneDataPolicy(),
+ optionalCallbacks ? optionalCallbacks : data_.callbacks_,
+ optionalCallbacks ? closure : data_.closure_);
+}
+
+bool JSAutoStructuredCloneBuffer::write(
+ JSContext* cx, HandleValue value, HandleValue transferable,
+ const JS::CloneDataPolicy& cloneDataPolicy,
+ const JSStructuredCloneCallbacks* optionalCallbacks, void* closure) {
+ clear();
+ bool ok = JS_WriteStructuredClone(
+ cx, value, &data_, data_.scopeForInternalWriting(), cloneDataPolicy,
+ optionalCallbacks ? optionalCallbacks : data_.callbacks_,
+ optionalCallbacks ? closure : data_.closure_, transferable);
+ if (!ok) {
+ version_ = JS_STRUCTURED_CLONE_VERSION;
+ }
+ return ok;
+}
+
+JS_PUBLIC_API bool JS_ReadUint32Pair(JSStructuredCloneReader* r, uint32_t* p1,
+ uint32_t* p2) {
+ return r->input().readPair((uint32_t*)p1, (uint32_t*)p2);
+}
+
+JS_PUBLIC_API bool JS_ReadBytes(JSStructuredCloneReader* r, void* p,
+ size_t len) {
+ return r->input().readBytes(p, len);
+}
+
+JS_PUBLIC_API bool JS_ReadString(JSStructuredCloneReader* r,
+ MutableHandleString str) {
+ uint32_t tag, data;
+ if (!r->input().readPair(&tag, &data)) {
+ return false;
+ }
+
+ if (tag == SCTAG_STRING) {
+ if (JSString* s = r->readString(data)) {
+ str.set(s);
+ return true;
+ }
+ return false;
+ }
+
+ JS_ReportErrorNumberASCII(r->context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA, "expected string");
+ return false;
+}
+
+JS_PUBLIC_API bool JS_ReadDouble(JSStructuredCloneReader* r, double* v) {
+ return r->input().readDouble(v);
+}
+
+JS_PUBLIC_API bool JS_ReadTypedArray(JSStructuredCloneReader* r,
+ MutableHandleValue vp) {
+ uint32_t tag, data;
+ if (!r->input().readPair(&tag, &data)) {
+ return false;
+ }
+
+ if (tag >= SCTAG_TYPED_ARRAY_V1_MIN && tag <= SCTAG_TYPED_ARRAY_V1_MAX) {
+ return r->readTypedArray(TagToV1ArrayType(tag), data, vp, true);
+ }
+
+ if (tag == SCTAG_TYPED_ARRAY_OBJECT_V2) {
+ // V2 stores the length (nelems) in |data| and the arrayType separately.
+ uint64_t arrayType;
+ if (!r->input().read(&arrayType)) {
+ return false;
+ }
+ uint64_t nelems = data;
+ return r->readTypedArray(arrayType, nelems, vp);
+ }
+
+ if (tag == SCTAG_TYPED_ARRAY_OBJECT) {
+ // The current version stores the array type in |data| and the length
+ // (nelems) separately to support large TypedArrays.
+ uint32_t arrayType = data;
+ uint64_t nelems;
+ if (!r->input().read(&nelems)) {
+ return false;
+ }
+ return r->readTypedArray(arrayType, nelems, vp);
+ }
+
+ JS_ReportErrorNumberASCII(r->context(), GetErrorMessage, nullptr,
+ JSMSG_SC_BAD_SERIALIZED_DATA,
+ "expected type array");
+ return false;
+}
+
+JS_PUBLIC_API bool JS_WriteUint32Pair(JSStructuredCloneWriter* w, uint32_t tag,
+ uint32_t data) {
+ return w->output().writePair(tag, data);
+}
+
+JS_PUBLIC_API bool JS_WriteBytes(JSStructuredCloneWriter* w, const void* p,
+ size_t len) {
+ return w->output().writeBytes(p, len);
+}
+
+JS_PUBLIC_API bool JS_WriteString(JSStructuredCloneWriter* w,
+ HandleString str) {
+ return w->writeString(SCTAG_STRING, str);
+}
+
+JS_PUBLIC_API bool JS_WriteDouble(JSStructuredCloneWriter* w, double v) {
+ return w->output().writeDouble(v);
+}
+
+JS_PUBLIC_API bool JS_WriteTypedArray(JSStructuredCloneWriter* w,
+ HandleValue v) {
+ MOZ_ASSERT(v.isObject());
+ w->context()->check(v);
+ RootedObject obj(w->context(), &v.toObject());
+
+ // startWrite can write everything, thus we should check here
+ // and report error if the user passes a wrong type.
+ if (!obj->canUnwrapAs<TypedArrayObject>()) {
+ ReportAccessDenied(w->context());
+ return false;
+ }
+
+ // We should use startWrite instead of writeTypedArray, because
+ // typed array is an object, we should add it to the |memory|
+ // (allObjs) list. Directly calling writeTypedArray won't add it.
+ return w->startWrite(v);
+}
+
+JS_PUBLIC_API bool JS_ObjectNotWritten(JSStructuredCloneWriter* w,
+ HandleObject obj) {
+ w->memory.remove(w->memory.lookup(obj));
+
+ return true;
+}
+
+JS_PUBLIC_API JS::StructuredCloneScope JS_GetStructuredCloneScope(
+ JSStructuredCloneWriter* w) {
+ return w->output().scope();
+}
diff --git a/js/src/vm/SymbolType.cpp b/js/src/vm/SymbolType.cpp
new file mode 100644
index 0000000000..b82204329d
--- /dev/null
+++ b/js/src/vm/SymbolType.cpp
@@ -0,0 +1,146 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/SymbolType.h"
+
+#include "gc/Allocator.h"
+#include "gc/HashUtil.h"
+#include "util/StringBuffer.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+
+#include "vm/Realm-inl.h"
+
+using JS::Symbol;
+using namespace js;
+
+Symbol* Symbol::newInternal(JSContext* cx, JS::SymbolCode code, uint32_t hash,
+ Handle<JSAtom*> description) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ AutoAllocInAtomsZone az(cx);
+ return cx->newCell<Symbol>(code, hash, description);
+}
+
+Symbol* Symbol::new_(JSContext* cx, JS::SymbolCode code,
+ HandleString description) {
+ Rooted<JSAtom*> atom(cx);
+ if (description) {
+ atom = AtomizeString(cx, description);
+ if (!atom) {
+ return nullptr;
+ }
+ }
+
+ Symbol* sym = newInternal(cx, code, cx->runtime()->randomHashCode(), atom);
+ if (sym) {
+ cx->markAtom(sym);
+ }
+ return sym;
+}
+
+Symbol* Symbol::newWellKnown(JSContext* cx, JS::SymbolCode code,
+ Handle<PropertyName*> description) {
+ return newInternal(cx, code, cx->runtime()->randomHashCode(), description);
+}
+
+Symbol* Symbol::for_(JSContext* cx, HandleString description) {
+ Rooted<JSAtom*> atom(cx, AtomizeString(cx, description));
+ if (!atom) {
+ return nullptr;
+ }
+
+ SymbolRegistry& registry = cx->symbolRegistry();
+ DependentAddPtr<SymbolRegistry> p(cx, registry, atom);
+ if (p) {
+ cx->markAtom(*p);
+ return *p;
+ }
+
+ // Rehash the hash of the atom to give the corresponding symbol a hash
+ // that is different than the hash of the corresponding atom.
+ HashNumber hash = mozilla::HashGeneric(atom->hash());
+ Symbol* sym = newInternal(cx, SymbolCode::InSymbolRegistry, hash, atom);
+ if (!sym) {
+ return nullptr;
+ }
+
+ if (!p.add(cx, registry, atom, sym)) {
+ return nullptr;
+ }
+
+ cx->markAtom(sym);
+ return sym;
+}
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+void Symbol::dump() {
+ js::Fprinter out(stderr);
+ dump(out);
+}
+
+void Symbol::dump(js::GenericPrinter& out) {
+ if (isWellKnownSymbol()) {
+ // All the well-known symbol names are ASCII.
+ description()->dumpCharsNoNewline(out);
+ } else if (code_ == SymbolCode::InSymbolRegistry ||
+ code_ == SymbolCode::UniqueSymbol) {
+ out.printf(code_ == SymbolCode::InSymbolRegistry ? "Symbol.for("
+ : "Symbol(");
+
+ if (description()) {
+ description()->dumpCharsNoNewline(out);
+ } else {
+ out.printf("undefined");
+ }
+
+ out.putChar(')');
+
+ if (code_ == SymbolCode::UniqueSymbol) {
+ out.printf("@%p", (void*)this);
+ }
+ } else if (code_ == SymbolCode::PrivateNameSymbol) {
+ MOZ_ASSERT(description());
+ out.putChar('#');
+ description()->dumpCharsNoNewline(out);
+ out.printf("@%p", (void*)this);
+ } else {
+ out.printf("<Invalid Symbol code=%u>", unsigned(code_));
+ }
+}
+#endif // defined(DEBUG) || defined(JS_JITSPEW)
+
+bool js::SymbolDescriptiveString(JSContext* cx, Symbol* sym,
+ MutableHandleValue result) {
+ // steps 2-5
+ JSStringBuilder sb(cx);
+ if (!sb.append("Symbol(")) {
+ return false;
+ }
+ if (JSAtom* desc = sym->description()) {
+ if (!sb.append(desc)) {
+ return false;
+ }
+ }
+ if (!sb.append(')')) {
+ return false;
+ }
+
+ // step 6
+ JSString* str = sb.finishString();
+ if (!str) {
+ return false;
+ }
+ result.setString(str);
+ return true;
+}
+
+JS::ubi::Node::Size JS::ubi::Concrete<JS::Symbol>::size(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ // If we start allocating symbols in the nursery, we will need to update
+ // this method.
+ MOZ_ASSERT(get().isTenured());
+ return js::gc::Arena::thingSize(get().asTenured().getAllocKind());
+}
diff --git a/js/src/vm/SymbolType.h b/js/src/vm/SymbolType.h
new file mode 100644
index 0000000000..2a66e6c30f
--- /dev/null
+++ b/js/src/vm/SymbolType.h
@@ -0,0 +1,153 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_SymbolType_h
+#define vm_SymbolType_h
+
+#include <stdio.h>
+
+#include "gc/Barrier.h"
+#include "gc/Tracer.h"
+#include "js/AllocPolicy.h"
+#include "js/GCHashTable.h"
+#include "js/RootingAPI.h"
+#include "js/shadow/Symbol.h" // JS::shadow::Symbol
+#include "js/Symbol.h"
+#include "js/TypeDecls.h"
+#include "vm/StringType.h"
+
+namespace js {
+class JS_PUBLIC_API GenericPrinter;
+}
+
+namespace JS {
+
+class Symbol
+ : public js::gc::CellWithTenuredGCPointer<js::gc::TenuredCell, JSAtom> {
+ friend class js::gc::CellAllocator;
+
+ public:
+ // User description of symbol, stored in the cell header.
+ JSAtom* description() const { return headerPtr(); }
+
+ private:
+ SymbolCode code_;
+
+ // Each Symbol gets its own hash code so that we don't have to use
+ // addresses as hash codes (a security hazard).
+ js::HashNumber hash_;
+
+ Symbol(SymbolCode code, js::HashNumber hash, Handle<JSAtom*> desc)
+ : CellWithTenuredGCPointer(desc), code_(code), hash_(hash) {}
+
+ Symbol(const Symbol&) = delete;
+ void operator=(const Symbol&) = delete;
+
+ static Symbol* newInternal(JSContext* cx, SymbolCode code,
+ js::HashNumber hash, Handle<JSAtom*> description);
+
+ static void staticAsserts() {
+ static_assert(uint32_t(SymbolCode::WellKnownAPILimit) ==
+ JS::shadow::Symbol::WellKnownAPILimit,
+ "JS::shadow::Symbol::WellKnownAPILimit must match "
+ "SymbolCode::WellKnownAPILimit");
+ static_assert(
+ offsetof(Symbol, code_) == offsetof(JS::shadow::Symbol, code_),
+ "JS::shadow::Symbol::code_ offset must match SymbolCode::code_");
+ }
+
+ public:
+ static Symbol* new_(JSContext* cx, SymbolCode code,
+ js::HandleString description);
+ static Symbol* newWellKnown(JSContext* cx, SymbolCode code,
+ Handle<js::PropertyName*> description);
+ static Symbol* for_(JSContext* cx, js::HandleString description);
+
+ SymbolCode code() const { return code_; }
+ js::HashNumber hash() const { return hash_; }
+
+ bool isWellKnownSymbol() const {
+ return uint32_t(code_) < WellKnownSymbolLimit;
+ }
+
+ // An "interesting symbol" is a well-known symbol, like @@toStringTag,
+ // that's often looked up on random objects but is usually not present. We
+ // optimize this by setting a flag on the object's BaseShape when such
+ // symbol properties are added, so we can optimize lookups on objects that
+ // don't have the BaseShape flag.
+ bool isInterestingSymbol() const {
+ return code_ == SymbolCode::toStringTag ||
+ code_ == SymbolCode::toPrimitive ||
+ code_ == SymbolCode::isConcatSpreadable;
+ }
+
+ // Symbol created for the #PrivateName syntax.
+ bool isPrivateName() const { return code_ == SymbolCode::PrivateNameSymbol; }
+
+ static const JS::TraceKind TraceKind = JS::TraceKind::Symbol;
+
+ void traceChildren(JSTracer* trc);
+ void finalize(JS::GCContext* gcx) {}
+
+ // Override base class implementation to tell GC about well-known symbols.
+ bool isPermanentAndMayBeShared() const { return isWellKnownSymbol(); }
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this);
+ }
+
+#if defined(DEBUG) || defined(JS_JITSPEW)
+ void dump(); // Debugger-friendly stderr dump.
+ void dump(js::GenericPrinter& out);
+#endif
+
+ static constexpr size_t offsetOfHash() { return offsetof(Symbol, hash_); }
+};
+
+} /* namespace JS */
+
+namespace js {
+
+/* Hash policy used by the SymbolRegistry. */
+struct HashSymbolsByDescription {
+ using Key = JS::Symbol*;
+ using Lookup = JSAtom*;
+
+ static HashNumber hash(Lookup l) { return HashNumber(l->hash()); }
+ static bool match(Key sym, Lookup l) { return sym->description() == l; }
+};
+
+/*
+ * [SMDOC] Symbol.for() registry (ES6 GlobalSymbolRegistry)
+ *
+ * The runtime-wide symbol registry, used to implement Symbol.for().
+ *
+ * ES6 draft rev 25 (2014 May 22) calls this the GlobalSymbolRegistry List. In
+ * our implementation, it is not global. There is one per JSRuntime. The
+ * symbols in the symbol registry, like all symbols, are allocated in the atoms
+ * compartment and can be directly referenced from any compartment. They are
+ * never shared across runtimes.
+ *
+ * The memory management strategy here is modeled after js::AtomSet. It's like
+ * a WeakSet. The registry itself does not keep any symbols alive; when a
+ * symbol in the registry is collected, the registry entry is removed. No GC
+ * nondeterminism is exposed to scripts, because there is no API for
+ * enumerating the symbol registry, querying its size, etc.
+ */
+class SymbolRegistry
+ : public GCHashSet<WeakHeapPtr<JS::Symbol*>, HashSymbolsByDescription,
+ SystemAllocPolicy> {
+ public:
+ SymbolRegistry() = default;
+};
+
+// ES6 rev 27 (2014 Aug 24) 19.4.3.3
+bool SymbolDescriptiveString(JSContext* cx, JS::Symbol* sym,
+ JS::MutableHandleValue result);
+
+} /* namespace js */
+
+#endif /* vm_SymbolType_h */
diff --git a/js/src/vm/TaggedProto.cpp b/js/src/vm/TaggedProto.cpp
new file mode 100644
index 0000000000..558d569a00
--- /dev/null
+++ b/js/src/vm/TaggedProto.cpp
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/TaggedProto.h"
+
+#include "gc/Barrier.h"
+#include "vm/JSObject.h"
+
+namespace js {
+
+/* static */ void InternalBarrierMethods<TaggedProto>::preBarrier(
+ TaggedProto& proto) {
+ InternalBarrierMethods<JSObject*>::preBarrier(proto.toObjectOrNull());
+}
+
+/* static */ void InternalBarrierMethods<TaggedProto>::postBarrier(
+ TaggedProto* vp, TaggedProto prev, TaggedProto next) {
+ JSObject* prevObj = prev.isObject() ? prev.toObject() : nullptr;
+ JSObject* nextObj = next.isObject() ? next.toObject() : nullptr;
+ InternalBarrierMethods<JSObject*>::postBarrier(
+ reinterpret_cast<JSObject**>(vp), prevObj, nextObj);
+}
+
+/* static */ void InternalBarrierMethods<TaggedProto>::readBarrier(
+ const TaggedProto& proto) {
+ InternalBarrierMethods<JSObject*>::readBarrier(proto.toObjectOrNull());
+}
+
+void TaggedProto::trace(JSTracer* trc) { TraceRoot(trc, this, "TaggedProto"); }
+
+} // namespace js
diff --git a/js/src/vm/TaggedProto.h b/js/src/vm/TaggedProto.h
new file mode 100644
index 0000000000..42aecf998a
--- /dev/null
+++ b/js/src/vm/TaggedProto.h
@@ -0,0 +1,173 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_TaggedProto_h
+#define vm_TaggedProto_h
+
+#include "mozilla/Maybe.h"
+
+#include "gc/Barrier.h"
+#include "js/HashTable.h"
+#include "js/RootingAPI.h"
+
+class JSObject;
+
+namespace js {
+
+// Information about an object prototype, which can be either a particular
+// object, null, or a lazily generated object. The latter is only used by
+// certain kinds of proxies.
+class TaggedProto {
+ public:
+ static JSObject* const LazyProto;
+
+ TaggedProto() : proto(nullptr) {}
+ TaggedProto(const TaggedProto& other) = default;
+ explicit TaggedProto(JSObject* proto) : proto(proto) {}
+
+ bool isDynamic() const { return proto == LazyProto; }
+ bool isObject() const {
+ /* Skip nullptr and LazyProto. */
+ return uintptr_t(proto) > uintptr_t(TaggedProto::LazyProto);
+ }
+ JSObject* toObject() const {
+ MOZ_ASSERT(isObject());
+ return proto;
+ }
+ JSObject* toObjectOrNull() const {
+ MOZ_ASSERT(!proto || isObject());
+ return proto;
+ }
+ JSObject* raw() const { return proto; }
+
+ bool operator==(const TaggedProto& other) const {
+ return proto == other.proto;
+ }
+ bool operator!=(const TaggedProto& other) const {
+ return proto != other.proto;
+ }
+
+ HashNumber hashCode() const;
+
+ void trace(JSTracer* trc);
+
+ private:
+ JSObject* proto;
+};
+
+template <>
+struct StableCellHasher<TaggedProto> {
+ using Key = TaggedProto;
+ using Lookup = TaggedProto;
+
+ static bool maybeGetHash(const Lookup& l, HashNumber* hashOut) {
+ if (!l.isObject()) {
+ *hashOut = hash(l);
+ return true;
+ }
+
+ return StableCellHasher<JSObject*>::maybeGetHash(l.toObject(), hashOut);
+ }
+ static bool ensureHash(const Lookup& l, HashNumber* hashOut) {
+ if (!l.isObject()) {
+ *hashOut = hash(l);
+ return true;
+ }
+ return StableCellHasher<JSObject*>::ensureHash(l.toObject(), hashOut);
+ }
+ static HashNumber hash(const Lookup& l) {
+ if (l.isDynamic()) {
+ return uint64_t(1);
+ }
+ if (!l.isObject()) {
+ return uint64_t(0);
+ }
+ return StableCellHasher<JSObject*>::hash(l.toObject());
+ }
+ static bool match(const Key& k, const Lookup& l) {
+ return k.isDynamic() == l.isDynamic() && k.isObject() == l.isObject() &&
+ (!k.isObject() ||
+ StableCellHasher<JSObject*>::match(k.toObject(), l.toObject()));
+ }
+};
+
+#ifdef DEBUG
+MOZ_ALWAYS_INLINE void AssertTaggedProtoIsNotGray(const TaggedProto& proto) {
+ if (proto.isObject()) {
+ JS::AssertObjectIsNotGray(proto.toObject());
+ }
+}
+#endif
+
+template <>
+struct InternalBarrierMethods<TaggedProto> {
+ static void preBarrier(TaggedProto& proto);
+
+ static void postBarrier(TaggedProto* vp, TaggedProto prev, TaggedProto next);
+
+ static void readBarrier(const TaggedProto& proto);
+
+ static bool isMarkable(const TaggedProto& proto) { return proto.isObject(); }
+
+#ifdef DEBUG
+ static void assertThingIsNotGray(const TaggedProto& proto) {
+ AssertTaggedProtoIsNotGray(proto);
+ }
+#endif
+};
+
+template <class Wrapper>
+class WrappedPtrOperations<TaggedProto, Wrapper> {
+ const TaggedProto& value() const {
+ return static_cast<const Wrapper*>(this)->get();
+ }
+
+ public:
+ uintptr_t toWord() const { return value().toWord(); }
+ inline bool isDynamic() const { return value().isDynamic(); }
+ inline bool isObject() const { return value().isObject(); }
+ inline JSObject* toObject() const { return value().toObject(); }
+ inline JSObject* toObjectOrNull() const { return value().toObjectOrNull(); }
+ JSObject* raw() const { return value().raw(); }
+ HashNumber hashCode() const { return value().hashCode(); }
+ uint64_t uniqueId() const { return value().uniqueId(); }
+};
+
+// If the TaggedProto is a JSObject pointer, convert to that type and call |f|
+// with the pointer. If the TaggedProto is lazy, returns None().
+template <typename F>
+auto MapGCThingTyped(const TaggedProto& proto, F&& f) {
+ if (proto.isObject()) {
+ return mozilla::Some(f(proto.toObject()));
+ }
+ using ReturnType = decltype(f(static_cast<JSObject*>(nullptr)));
+ return mozilla::Maybe<ReturnType>();
+}
+
+template <typename F>
+bool ApplyGCThingTyped(const TaggedProto& proto, F&& f) {
+ return MapGCThingTyped(proto,
+ [&f](auto t) {
+ f(t);
+ return true;
+ })
+ .isSome();
+}
+
+// Since JSObject pointers are either nullptr or a valid object and since the
+// object layout of TaggedProto is identical to a bare object pointer, we can
+// safely treat a pointer to an already-rooted object (e.g. HandleObject) as a
+// pointer to a TaggedProto.
+inline Handle<TaggedProto> AsTaggedProto(HandleObject obj) {
+ static_assert(sizeof(JSObject*) == sizeof(TaggedProto),
+ "TaggedProto must be binary compatible with JSObject");
+ return Handle<TaggedProto>::fromMarkedLocation(
+ reinterpret_cast<TaggedProto const*>(obj.address()));
+}
+
+} // namespace js
+
+#endif // vm_TaggedProto_h
diff --git a/js/src/vm/ThrowMsgKind.cpp b/js/src/vm/ThrowMsgKind.cpp
new file mode 100644
index 0000000000..671a962a23
--- /dev/null
+++ b/js/src/vm/ThrowMsgKind.cpp
@@ -0,0 +1,36 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/ThrowMsgKind.h"
+
+#include "mozilla/Assertions.h" // MOZ_CRASH
+
+#include "js/friend/ErrorMessages.h" // JSErrNum, JSMSG_*
+
+JSErrNum js::ThrowMsgKindToErrNum(ThrowMsgKind kind) {
+ switch (kind) {
+ case ThrowMsgKind::AssignToCall:
+ return JSMSG_ASSIGN_TO_CALL;
+ case ThrowMsgKind::IteratorNoThrow:
+ return JSMSG_ITERATOR_NO_THROW;
+ case ThrowMsgKind::CantDeleteSuper:
+ return JSMSG_CANT_DELETE_SUPER;
+ case ThrowMsgKind::PrivateDoubleInit:
+ return JSMSG_PRIVATE_FIELD_DOUBLE;
+ case ThrowMsgKind::PrivateBrandDoubleInit:
+ return JSMSG_PRIVATE_BRAND_DOUBLE;
+ case ThrowMsgKind::MissingPrivateOnGet:
+ return JSMSG_GET_MISSING_PRIVATE;
+ case ThrowMsgKind::MissingPrivateOnSet:
+ return JSMSG_SET_MISSING_PRIVATE;
+ case ThrowMsgKind::AssignToPrivateMethod:
+ return JSMSG_ASSIGN_TO_PRIVATE_METHOD;
+ case ThrowMsgKind::DecoratorInvalidReturnType:
+ return JSMSG_DECORATOR_INVALID_RETURN_TYPE;
+ }
+
+ MOZ_CRASH("Unexpected message kind");
+}
diff --git a/js/src/vm/ThrowMsgKind.h b/js/src/vm/ThrowMsgKind.h
new file mode 100644
index 0000000000..2631ed011a
--- /dev/null
+++ b/js/src/vm/ThrowMsgKind.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ThrowMsgKind_h
+#define vm_ThrowMsgKind_h
+
+#include <stdint.h> // uint8_t
+
+#include "js/friend/ErrorMessages.h" // JSErrNum
+
+namespace js {
+
+enum class ThrowMsgKind : uint8_t {
+ AssignToCall,
+ IteratorNoThrow,
+ CantDeleteSuper,
+ // Private Fields:
+ PrivateDoubleInit,
+ PrivateBrandDoubleInit,
+ MissingPrivateOnGet,
+ MissingPrivateOnSet,
+ AssignToPrivateMethod,
+ // Decorators:
+ DecoratorInvalidReturnType,
+};
+
+JSErrNum ThrowMsgKindToErrNum(ThrowMsgKind kind);
+
+// Used for CheckPrivateField
+enum class ThrowCondition : uint8_t { ThrowHas, ThrowHasNot, OnlyCheckRhs };
+
+} // namespace js
+
+#endif /* vm_ThrowMsgKind_h */
diff --git a/js/src/vm/Time.cpp b/js/src/vm/Time.cpp
new file mode 100644
index 0000000000..41ca68f8ee
--- /dev/null
+++ b/js/src/vm/Time.cpp
@@ -0,0 +1,383 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* PR time code. */
+
+#include "vm/Time.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#ifdef SOLARIS
+# define _REENTRANT 1
+#endif
+#include <string.h>
+#include <time.h>
+
+#include "jstypes.h"
+
+#ifdef XP_WIN
+# include <windef.h>
+# include <winbase.h>
+# include <crtdbg.h> /* for _CrtSetReportMode */
+# include <mmsystem.h> /* for timeBegin/EndPeriod */
+# include <stdlib.h> /* for _set_invalid_parameter_handler */
+#endif
+
+#ifdef XP_UNIX
+
+# ifdef _SVID_GETTOD /* Defined only on Solaris, see Solaris <sys/types.h> */
+extern int gettimeofday(struct timeval* tv);
+# endif
+
+# include <sys/time.h>
+
+#endif /* XP_UNIX */
+
+using mozilla::DebugOnly;
+
+#if defined(XP_UNIX)
+int64_t PRMJ_Now() {
+ struct timeval tv;
+
+# ifdef _SVID_GETTOD /* Defined only on Solaris, see Solaris <sys/types.h> */
+ gettimeofday(&tv);
+# else
+ gettimeofday(&tv, 0);
+# endif /* _SVID_GETTOD */
+
+ return int64_t(tv.tv_sec) * PRMJ_USEC_PER_SEC + int64_t(tv.tv_usec);
+}
+
+#else
+
+// Returns the number of microseconds since the Unix epoch.
+static double FileTimeToUnixMicroseconds(const FILETIME& ft) {
+ // Get the time in 100ns intervals.
+ int64_t t = (int64_t(ft.dwHighDateTime) << 32) | int64_t(ft.dwLowDateTime);
+
+ // The Windows epoch is around 1600. The Unix epoch is around 1970.
+ // Subtract the difference.
+ static const int64_t TimeToEpochIn100ns = 0x19DB1DED53E8000;
+ t -= TimeToEpochIn100ns;
+
+ // Divide by 10 to convert to microseconds.
+ return double(t) * 0.1;
+}
+
+struct CalibrationData {
+ double freq; /* The performance counter frequency */
+ double offset; /* The low res 'epoch' */
+ double timer_offset; /* The high res 'epoch' */
+
+ bool calibrated;
+
+ CRITICAL_SECTION data_lock;
+};
+
+static CalibrationData calibration = {0};
+
+static void NowCalibrate() {
+ MOZ_ASSERT(calibration.freq > 0);
+
+ // By wrapping a timeBegin/EndPeriod pair of calls around this loop,
+ // the loop seems to take much less time (1 ms vs 15ms) on Vista.
+ timeBeginPeriod(1);
+ FILETIME ft, ftStart;
+ GetSystemTimeAsFileTime(&ftStart);
+ do {
+ GetSystemTimeAsFileTime(&ft);
+ } while (memcmp(&ftStart, &ft, sizeof(ft)) == 0);
+ timeEndPeriod(1);
+
+ LARGE_INTEGER now;
+ QueryPerformanceCounter(&now);
+
+ calibration.offset = FileTimeToUnixMicroseconds(ft);
+ calibration.timer_offset = double(now.QuadPart);
+ calibration.calibrated = true;
+}
+
+static const unsigned DataLockSpinCount = 4096;
+
+static void(WINAPI* pGetSystemTimePreciseAsFileTime)(LPFILETIME) = nullptr;
+
+void PRMJ_NowInit() {
+ memset(&calibration, 0, sizeof(calibration));
+
+ // According to the documentation, QueryPerformanceFrequency will never
+ // return false or return a non-zero frequency on systems that run
+ // Windows XP or later. Also, the frequency is fixed so we only have to
+ // query it once.
+ LARGE_INTEGER liFreq;
+ DebugOnly<BOOL> res = QueryPerformanceFrequency(&liFreq);
+ MOZ_ASSERT(res);
+ calibration.freq = double(liFreq.QuadPart);
+ MOZ_ASSERT(calibration.freq > 0.0);
+
+ InitializeCriticalSectionAndSpinCount(&calibration.data_lock,
+ DataLockSpinCount);
+
+ // Windows 8 has a new API function we can use.
+ if (HMODULE h = GetModuleHandle("kernel32.dll")) {
+ pGetSystemTimePreciseAsFileTime = (void(WINAPI*)(LPFILETIME))GetProcAddress(
+ h, "GetSystemTimePreciseAsFileTime");
+ }
+}
+
+void PRMJ_NowShutdown() { DeleteCriticalSection(&calibration.data_lock); }
+
+# define MUTEX_LOCK(m) EnterCriticalSection(m)
+# define MUTEX_UNLOCK(m) LeaveCriticalSection(m)
+# define MUTEX_SETSPINCOUNT(m, c) SetCriticalSectionSpinCount((m), (c))
+
+// Please see bug 363258 for why the win32 timing code is so complex.
+static int64_t PRMJ_Now() {
+ if (pGetSystemTimePreciseAsFileTime) {
+ // Windows 8 has a new API function that does all the work.
+ FILETIME ft;
+ pGetSystemTimePreciseAsFileTime(&ft);
+ return int64_t(FileTimeToUnixMicroseconds(ft));
+ }
+
+ bool calibrated = false;
+ bool needsCalibration = !calibration.calibrated;
+ double cachedOffset = 0.0;
+ while (true) {
+ if (needsCalibration) {
+ MUTEX_LOCK(&calibration.data_lock);
+
+ // Recalibrate only if no one else did before us.
+ if (calibration.offset == cachedOffset) {
+ // Since calibration can take a while, make any other
+ // threads immediately wait.
+ MUTEX_SETSPINCOUNT(&calibration.data_lock, 0);
+
+ NowCalibrate();
+
+ calibrated = true;
+
+ // Restore spin count.
+ MUTEX_SETSPINCOUNT(&calibration.data_lock, DataLockSpinCount);
+ }
+
+ MUTEX_UNLOCK(&calibration.data_lock);
+ }
+
+ // Calculate a low resolution time.
+ FILETIME ft;
+ GetSystemTimeAsFileTime(&ft);
+ double lowresTime = FileTimeToUnixMicroseconds(ft);
+
+ // Grab high resolution time.
+ LARGE_INTEGER now;
+ QueryPerformanceCounter(&now);
+ double highresTimerValue = double(now.QuadPart);
+
+ MUTEX_LOCK(&calibration.data_lock);
+ double highresTime = calibration.offset +
+ PRMJ_USEC_PER_SEC *
+ (highresTimerValue - calibration.timer_offset) /
+ calibration.freq;
+ cachedOffset = calibration.offset;
+ MUTEX_UNLOCK(&calibration.data_lock);
+
+ // Assume the NT kernel ticks every 15.6 ms. Unfortunately there's no
+ // good way to determine this (NtQueryTimerResolution is an undocumented
+ // API), but 15.6 ms seems to be the max possible value. Hardcoding 15.6
+ // means we'll recalibrate if the highres and lowres timers diverge by
+ // more than 30 ms.
+ static const double KernelTickInMicroseconds = 15625.25;
+
+ // Check for clock skew.
+ double diff = lowresTime - highresTime;
+
+ // For some reason that I have not determined, the skew can be
+ // up to twice a kernel tick. This does not seem to happen by
+ // itself, but I have only seen it triggered by another program
+ // doing some kind of file I/O. The symptoms are a negative diff
+ // followed by an equally large positive diff.
+ if (mozilla::Abs(diff) <= 2 * KernelTickInMicroseconds) {
+ // No detectable clock skew.
+ return int64_t(highresTime);
+ }
+
+ if (calibrated) {
+ // If we already calibrated once this instance, and the
+ // clock is still skewed, then either the processor(s) are
+ // wildly changing clockspeed or the system is so busy that
+ // we get switched out for long periods of time. In either
+ // case, it would be infeasible to make use of high
+ // resolution results for anything, so let's resort to old
+ // behavior for this call. It's possible that in the
+ // future, the user will want the high resolution timer, so
+ // we don't disable it entirely.
+ return int64_t(lowresTime);
+ }
+
+ // It is possible that when we recalibrate, we will return a
+ // value less than what we have returned before; this is
+ // unavoidable. We cannot tell the different between a
+ // faulty QueryPerformanceCounter implementation and user
+ // changes to the operating system time. Since we must
+ // respect user changes to the operating system time, we
+ // cannot maintain the invariant that Date.now() never
+ // decreases; the old implementation has this behavior as
+ // well.
+ needsCalibration = true;
+ }
+}
+#endif
+
+#if !JS_HAS_INTL_API
+# ifdef XP_WIN
+static void PRMJ_InvalidParameterHandler(const wchar_t* expression,
+ const wchar_t* function,
+ const wchar_t* file, unsigned int line,
+ uintptr_t pReserved) {
+ /* empty */
+}
+# endif
+
+/* Format a time value into a buffer. Same semantics as strftime() */
+size_t PRMJ_FormatTime(char* buf, size_t buflen, const char* fmt,
+ const PRMJTime* prtm, int timeZoneYear,
+ int offsetInSeconds) {
+ size_t result = 0;
+# if defined(XP_UNIX) || defined(XP_WIN)
+ struct tm a;
+# ifdef XP_WIN
+ _invalid_parameter_handler oldHandler;
+# ifndef __MINGW32__
+ int oldReportMode;
+# endif // __MINGW32__
+# endif // XP_WIN
+
+ memset(&a, 0, sizeof(struct tm));
+
+ a.tm_sec = prtm->tm_sec;
+ a.tm_min = prtm->tm_min;
+ a.tm_hour = prtm->tm_hour;
+ a.tm_mday = prtm->tm_mday;
+ a.tm_mon = prtm->tm_mon;
+ a.tm_wday = prtm->tm_wday;
+
+ /*
+ * On systems where |struct tm| has members tm_gmtoff and tm_zone, we
+ * must fill in those values, or else strftime will return wrong results
+ * (e.g., bug 511726, bug 554338).
+ */
+# if defined(HAVE_LOCALTIME_R) && defined(HAVE_TM_ZONE_TM_GMTOFF)
+ char emptyTimeZoneId[] = "";
+ {
+ /*
+ * Fill out |td| to the time represented by |prtm|, leaving the
+ * timezone fields zeroed out. localtime_r will then fill in the
+ * timezone fields for that local time according to the system's
+ * timezone parameters. Use |timeZoneYear| for the year to ensure the
+ * time zone name matches the time zone offset used by the caller.
+ */
+ struct tm td;
+ memset(&td, 0, sizeof(td));
+ td.tm_sec = prtm->tm_sec;
+ td.tm_min = prtm->tm_min;
+ td.tm_hour = prtm->tm_hour;
+ td.tm_mday = prtm->tm_mday;
+ td.tm_mon = prtm->tm_mon;
+ td.tm_wday = prtm->tm_wday;
+ td.tm_year = timeZoneYear - 1900;
+ td.tm_yday = prtm->tm_yday;
+ td.tm_isdst = prtm->tm_isdst;
+
+ time_t t = mktime(&td);
+
+ // If either mktime or localtime_r failed, fill in the fallback time
+ // zone offset |offsetInSeconds| and set the time zone identifier to
+ // the empty string.
+ if (t != static_cast<time_t>(-1) && localtime_r(&t, &td)) {
+ a.tm_gmtoff = td.tm_gmtoff;
+ a.tm_zone = td.tm_zone;
+ } else {
+ a.tm_gmtoff = offsetInSeconds;
+ a.tm_zone = emptyTimeZoneId;
+ }
+ }
+# endif
+
+ /*
+ * Years before 1900 and after 9999 cause strftime() to abort on Windows.
+ * To avoid that we replace it with FAKE_YEAR_BASE + year % 100 and then
+ * replace matching substrings in the strftime() result with the real year.
+ * Note that FAKE_YEAR_BASE should be a multiple of 100 to make 2-digit
+ * year formats (%y) work correctly (since we won't find the fake year
+ * in that case).
+ */
+ constexpr int FAKE_YEAR_BASE = 9900;
+ int fake_tm_year = 0;
+ if (prtm->tm_year < 1900 || prtm->tm_year > 9999) {
+ fake_tm_year = FAKE_YEAR_BASE + prtm->tm_year % 100;
+ a.tm_year = fake_tm_year - 1900;
+ } else {
+ a.tm_year = prtm->tm_year - 1900;
+ }
+ a.tm_yday = prtm->tm_yday;
+ a.tm_isdst = prtm->tm_isdst;
+
+ /*
+ * Even with the above, SunOS 4 seems to detonate if tm_zone and tm_gmtoff
+ * are null. This doesn't quite work, though - the timezone is off by
+ * tzoff + dst. (And mktime seems to return -1 for the exact dst
+ * changeover time.)
+ */
+
+# ifdef XP_WIN
+ oldHandler = _set_invalid_parameter_handler(PRMJ_InvalidParameterHandler);
+# ifndef __MINGW32__
+ /*
+ * MinGW doesn't have _CrtSetReportMode and defines it to be a no-op.
+ * We ifdef it off to avoid warnings about unused variables
+ */
+ oldReportMode = _CrtSetReportMode(_CRT_ASSERT, 0);
+# endif // __MINGW32__
+# endif // XP_WIN
+
+ result = strftime(buf, buflen, fmt, &a);
+
+# ifdef XP_WIN
+ _set_invalid_parameter_handler(oldHandler);
+# ifndef __MINGW32__
+ _CrtSetReportMode(_CRT_ASSERT, oldReportMode);
+# endif // __MINGW32__
+# endif // XP_WIN
+
+ if (fake_tm_year && result) {
+ char real_year[16];
+ char fake_year[16];
+ size_t real_year_len;
+ size_t fake_year_len;
+ char* p;
+
+ sprintf(real_year, "%d", prtm->tm_year);
+ real_year_len = strlen(real_year);
+ sprintf(fake_year, "%d", fake_tm_year);
+ fake_year_len = strlen(fake_year);
+
+ /* Replace the fake year in the result with the real year. */
+ for (p = buf; (p = strstr(p, fake_year)); p += real_year_len) {
+ size_t new_result = result + real_year_len - fake_year_len;
+ if (new_result >= buflen) {
+ return 0;
+ }
+ memmove(p + real_year_len, p + fake_year_len, strlen(p + fake_year_len));
+ memcpy(p, real_year, real_year_len);
+ result = new_result;
+ *(buf + result) = '\0';
+ }
+ }
+# endif
+ return result;
+}
+#endif /* !JS_HAS_INTL_API */
diff --git a/js/src/vm/Time.h b/js/src/vm/Time.h
new file mode 100644
index 0000000000..cc3c8a9641
--- /dev/null
+++ b/js/src/vm/Time.h
@@ -0,0 +1,176 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Time_h
+#define vm_Time_h
+
+#include "mozilla/TimeStamp.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#if !JS_HAS_INTL_API
+/*
+ * Broken down form of 64 bit time value.
+ */
+struct PRMJTime {
+ int32_t tm_usec; /* microseconds of second (0-999999) */
+ int8_t tm_sec; /* seconds of minute (0-59) */
+ int8_t tm_min; /* minutes of hour (0-59) */
+ int8_t tm_hour; /* hour of day (0-23) */
+ int8_t tm_mday; /* day of month (1-31) */
+ int8_t tm_mon; /* month of year (0-11) */
+ int8_t tm_wday; /* 0=sunday, 1=monday, ... */
+ int32_t tm_year; /* absolute year, AD */
+ int16_t tm_yday; /* day of year (0 to 365) */
+ int8_t tm_isdst; /* non-zero if DST in effect */
+};
+#endif
+
+/* Some handy constants */
+#define PRMJ_USEC_PER_SEC 1000000L
+#define PRMJ_USEC_PER_MSEC 1000L
+
+/* Return the current local time in micro-seconds */
+extern int64_t PRMJ_Now();
+
+/* Initialize the resources associated with PRMJ_Now. */
+#if defined(XP_WIN)
+extern void PRMJ_NowInit();
+#else
+inline void PRMJ_NowInit() {}
+#endif
+
+/* Release the resources associated with PRMJ_Now; don't call PRMJ_Now again */
+#ifdef XP_WIN
+extern void PRMJ_NowShutdown();
+#else
+inline void PRMJ_NowShutdown() {}
+#endif
+
+#if !JS_HAS_INTL_API
+/* Format a time value into a buffer. Same semantics as strftime() */
+extern size_t PRMJ_FormatTime(char* buf, size_t buflen, const char* fmt,
+ const PRMJTime* tm, int timeZoneYear,
+ int offsetInSeconds);
+#endif
+
+/**
+ * Requesting the number of cycles from the CPU.
+ *
+ * `rdtsc`, or Read TimeStamp Cycle, is an instruction provided by
+ * x86-compatible CPUs that lets processes request the number of
+ * cycles spent by the CPU executing instructions since the CPU was
+ * started. It may be used for performance monitoring, but you should
+ * be aware of the following limitations.
+ *
+ *
+ * 1. The value is *not* monotonic.
+ *
+ * The value is reset to 0 whenever a CPU is turned off (e.g. computer
+ * in full hibernation, single CPU going turned off). Moreover, on
+ * multi-core/multi-CPU architectures, the cycles of each core/CPU are
+ * generally not synchronized. Therefore, is a process or thread is
+ * rescheduled to another core/CPU, the result of `rdtsc` may decrease
+ * arbitrarily.
+ *
+ * The only way to prevent this is to pin your thread to a particular
+ * CPU, which is generally not a good idea.
+ *
+ *
+ *
+ * 2. The value increases independently.
+ *
+ * The value may increase whenever the CPU executes an instruction,
+ * regardless of the process that has issued this
+ * instruction. Moreover, if a process or thread is rescheduled to
+ * another core/CPU, the result of `rdtsc` may increase arbitrarily.
+ *
+ * The only way to prevent this is to ensure that your thread is the
+ * sole owner of the CPU. See [1] for an example. This is also
+ * generally not a good idea.
+ *
+ *
+ *
+ * 3. The value does not measure time.
+ *
+ * On older architectures (pre-Pentium 4), there was no constant mapping
+ * between rdtsc and CPU time.
+ *
+ *
+ * 4. Instructions may be reordered.
+ *
+ * The CPU can reorder instructions. Also, rdtsc does not necessarily
+ * wait until all previous instructions have finished executing before
+ * reading the counter. Similarly, subsequent instructions may begin
+ * execution before the read operation is performed. If you use rdtsc
+ * for micro-benchmarking, you may end up measuring something else
+ * than what you expect. See [1] for a study of countermeasures.
+ *
+ *
+ * ** Performance
+ *
+ * According to unchecked sources on the web, the overhead of rdtsc is
+ * expected to be 150-200 cycles on old architectures, 6-50 on newer
+ * architectures. Agner's instruction tables [2] seem to confirm the latter
+ * results.
+ *
+ *
+ * [1]
+ * http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/ia-32-ia-64-benchmark-code-execution-paper.pdf
+ * [2] http://www.agner.org/optimize/instruction_tables.pdf
+ */
+
+#define MOZ_HAVE_RDTSC 1
+
+#if defined(_WIN32) && (defined(_M_IX86) || defined(_M_AMD64))
+
+# include <intrin.h>
+static __inline uint64_t ReadTimestampCounter(void) { return __rdtsc(); }
+
+#elif defined(__i386__)
+
+static __inline__ uint64_t ReadTimestampCounter(void) {
+ uint64_t x;
+ __asm__ volatile(".byte 0x0f, 0x31" : "=A"(x));
+ return x;
+}
+
+#elif defined(__x86_64__)
+
+static __inline__ uint64_t ReadTimestampCounter(void) {
+ unsigned hi, lo;
+ __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi));
+ return ((uint64_t)lo) | (((uint64_t)hi) << 32);
+}
+
+#else
+
+# undef MOZ_HAVE_RDTSC
+
+#endif
+
+namespace js {
+
+class MOZ_RAII AutoIncrementalTimer {
+ mozilla::TimeStamp startTime;
+ mozilla::TimeDuration& output;
+
+ public:
+ AutoIncrementalTimer(const AutoIncrementalTimer&) = delete;
+ AutoIncrementalTimer& operator=(const AutoIncrementalTimer&) = delete;
+
+ explicit AutoIncrementalTimer(mozilla::TimeDuration& output_)
+ : output(output_) {
+ startTime = mozilla::TimeStamp::Now();
+ }
+
+ ~AutoIncrementalTimer() { output += mozilla::TimeStamp::Now() - startTime; }
+};
+
+} // namespace js
+
+#endif /* vm_Time_h */
diff --git a/js/src/vm/ToSource.cpp b/js/src/vm/ToSource.cpp
new file mode 100644
index 0000000000..af789166de
--- /dev/null
+++ b/js/src/vm/ToSource.cpp
@@ -0,0 +1,249 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/ToSource.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/FloatingPoint.h" // mozilla::IsNegativeZero
+
+#include <iterator> // std::size
+#include <stdint.h> // uint32_t
+
+#include "builtin/Array.h" // ArrayToSource
+#include "builtin/Boolean.h" // BooleanToString
+#include "builtin/Object.h" // ObjectToSource
+#include "gc/Allocator.h" // CanGC
+#include "js/Class.h" // ESClass
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
+#include "js/Object.h" // JS::GetBuiltinClass
+#include "js/Printer.h" // QuoteString
+#include "js/Symbol.h" // SymbolCode, JS::WellKnownSymbolLimit
+#include "js/TypeDecls.h" // Rooted{Function, Object, String, Value}, HandleValue, Latin1Char
+#include "js/Utility.h" // UniqueChars
+#include "js/Value.h" // JS::Value
+#include "util/StringBuffer.h" // JSStringBuilder
+#include "vm/ErrorObject.h" // ErrorObject, ErrorToSource
+#include "vm/Interpreter.h" // Call
+#include "vm/JSContext.h" // JSContext
+#include "vm/JSFunction.h" // JSFunction, fun_toStringHelper
+#include "vm/SelfHosting.h" // CallSelfHostedFunction
+#include "vm/Stack.h" // FixedInvokeArgs
+#include "vm/StaticStrings.h" // StaticStrings
+#include "vm/StringType.h" // NewStringCopy{N,Z}, ToString
+#include "vm/SymbolType.h" // Symbol
+#ifdef ENABLE_RECORD_TUPLE
+# include "vm/RecordType.h"
+# include "vm/TupleType.h"
+#endif
+
+#include "vm/JSContext-inl.h" // JSContext::check
+#include "vm/JSObject-inl.h" // IsCallable
+#include "vm/ObjectOperations-inl.h" // GetProperty
+
+using namespace js;
+
+using mozilla::IsNegativeZero;
+
+using JS::GetBuiltinClass;
+
+/*
+ * Convert a JSString to its source expression; returns null after reporting an
+ * error, otherwise returns a new string reference. No Handle needed since the
+ * input is dead after the GC.
+ */
+static JSString* StringToSource(JSContext* cx, JSString* str) {
+ UniqueChars chars = QuoteString(cx, str, '"');
+ if (!chars) {
+ return nullptr;
+ }
+ return NewStringCopyZ<CanGC>(cx, chars.get());
+}
+
+static JSString* SymbolToSource(JSContext* cx, JS::Symbol* symbol) {
+ using JS::SymbolCode;
+
+ RootedString desc(cx, symbol->description());
+ SymbolCode code = symbol->code();
+ if (symbol->isWellKnownSymbol()) {
+ // Well-known symbol.
+ return desc;
+ }
+
+ if (code == SymbolCode::PrivateNameSymbol) {
+ MOZ_ASSERT(desc);
+ return desc;
+ }
+
+ MOZ_ASSERT(code == SymbolCode::InSymbolRegistry ||
+ code == SymbolCode::UniqueSymbol);
+
+ JSStringBuilder buf(cx);
+ if (code == SymbolCode::InSymbolRegistry ? !buf.append("Symbol.for(")
+ : !buf.append("Symbol(")) {
+ return nullptr;
+ }
+ if (desc) {
+ UniqueChars quoted = QuoteString(cx, desc, '"');
+ if (!quoted || !buf.append(quoted.get(), strlen(quoted.get()))) {
+ return nullptr;
+ }
+ }
+ if (!buf.append(')')) {
+ return nullptr;
+ }
+ return buf.finishString();
+}
+
+static JSString* BoxedToSource(JSContext* cx, HandleObject obj,
+ const char* constructor) {
+ RootedValue value(cx);
+ if (!Unbox(cx, obj, &value)) {
+ return nullptr;
+ }
+ MOZ_ASSERT(!value.isUndefined());
+
+ RootedString str(cx, ValueToSource(cx, value));
+ if (!str) {
+ return nullptr;
+ }
+
+ JSStringBuilder buf(cx);
+ if (!buf.append("new ") || !buf.append(constructor, strlen(constructor)) ||
+ !buf.append('(') || !buf.append(str) || !buf.append(')')) {
+ return nullptr;
+ }
+
+ return buf.finishString();
+}
+
+JSString* js::ValueToSource(JSContext* cx, HandleValue v) {
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return nullptr;
+ }
+ cx->check(v);
+
+ switch (v.type()) {
+ case JS::ValueType::Undefined:
+ return cx->names().void0;
+
+ case JS::ValueType::String:
+ return StringToSource(cx, v.toString());
+
+ case JS::ValueType::Symbol:
+ return SymbolToSource(cx, v.toSymbol());
+
+ case JS::ValueType::Null:
+ return cx->names().null;
+
+ case JS::ValueType::Boolean:
+ return BooleanToString(cx, v.toBoolean());
+
+ case JS::ValueType::Double:
+ /* Special case to preserve negative zero, _contra_ toString. */
+ if (IsNegativeZero(v.toDouble())) {
+ static const Latin1Char negativeZero[] = {'-', '0'};
+
+ return NewStringCopyN<CanGC>(cx, negativeZero, std::size(negativeZero));
+ }
+ [[fallthrough]];
+ case JS::ValueType::Int32:
+ return ToString<CanGC>(cx, v);
+
+ case JS::ValueType::BigInt: {
+ RootedString str(cx, ToString<CanGC>(cx, v));
+ if (!str) {
+ return nullptr;
+ }
+
+ RootedString n(cx, cx->staticStrings().getUnit('n'));
+
+ return ConcatStrings<CanGC>(cx, str, n);
+ }
+
+#ifdef ENABLE_RECORD_TUPLE
+ case ValueType::ExtendedPrimitive: {
+ RootedObject obj(cx, &v.toExtendedPrimitive());
+ if (obj->is<TupleType>()) {
+ Rooted<TupleType*> tup(cx, &obj->as<TupleType>());
+ return TupleToSource(cx, tup);
+ }
+ if (obj->is<RecordType>()) {
+ return RecordToSource(cx, obj.as<RecordType>());
+ }
+ MOZ_CRASH("Unsupported ExtendedPrimitive");
+ }
+#endif
+
+ case JS::ValueType::Object: {
+ RootedValue fval(cx);
+ RootedObject obj(cx, &v.toObject());
+ if (!GetProperty(cx, obj, obj, cx->names().toSource, &fval)) {
+ return nullptr;
+ }
+ if (IsCallable(fval)) {
+ RootedValue v(cx);
+ if (!js::Call(cx, fval, obj, &v)) {
+ return nullptr;
+ }
+
+ return ToString<CanGC>(cx, v);
+ }
+
+ ESClass cls;
+ if (!GetBuiltinClass(cx, obj, &cls)) {
+ return nullptr;
+ }
+
+ // All ToSource functions must be able to handle wrapped objects!
+ switch (cls) {
+ case ESClass::Function:
+ return fun_toStringHelper(cx, obj, true);
+
+ case ESClass::Array:
+ return ArrayToSource(cx, obj);
+
+ case ESClass::Error:
+ return ErrorToSource(cx, obj);
+
+ case ESClass::RegExp: {
+ FixedInvokeArgs<0> args(cx);
+ RootedValue rval(cx);
+ if (!CallSelfHostedFunction(cx, cx->names().RegExpToString, v, args,
+ &rval)) {
+ return nullptr;
+ }
+ return ToString<CanGC>(cx, rval);
+ }
+
+ case ESClass::Boolean:
+ return BoxedToSource(cx, obj, "Boolean");
+
+ case ESClass::Number:
+ return BoxedToSource(cx, obj, "Number");
+
+ case ESClass::String:
+ return BoxedToSource(cx, obj, "String");
+
+ case ESClass::Date:
+ return BoxedToSource(cx, obj, "Date");
+
+ default:
+ return ObjectToSource(cx, obj);
+ }
+ }
+
+ case JS::ValueType::PrivateGCThing:
+ case JS::ValueType::Magic:
+ MOZ_ASSERT_UNREACHABLE(
+ "internal value types shouldn't leak into places "
+ "wanting source representations");
+ return nullptr;
+ }
+
+ MOZ_ASSERT_UNREACHABLE("shouldn't see an unrecognized value type");
+ return nullptr;
+}
diff --git a/js/src/vm/ToSource.h b/js/src/vm/ToSource.h
new file mode 100644
index 0000000000..583e3cb54a
--- /dev/null
+++ b/js/src/vm/ToSource.h
@@ -0,0 +1,26 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_ToSource_h
+#define vm_ToSource_h
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "js/RootingAPI.h" // JS::Handle
+#include "js/Value.h" // JS::Value
+
+struct JS_PUBLIC_API JSContext;
+class JS_PUBLIC_API JSString;
+
+namespace js {
+
+// Try to convert a value to its source expression, returning null after
+// reporting an error, otherwise returning a new string.
+extern JSString* ValueToSource(JSContext* cx, JS::Handle<JS::Value> v);
+
+} // namespace js
+
+#endif // vm_ToSource_h
diff --git a/js/src/vm/TupleType.cpp b/js/src/vm/TupleType.cpp
new file mode 100644
index 0000000000..a9b4df784c
--- /dev/null
+++ b/js/src/vm/TupleType.cpp
@@ -0,0 +1,639 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/TupleType.h"
+
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/HashFunctions.h"
+
+#include "jsapi.h"
+
+#include "builtin/TupleObject.h"
+#include "gc/Allocator.h"
+#include "gc/AllocKind.h"
+
+#include "js/TypeDecls.h"
+#include "js/Value.h"
+#include "util/StringBuffer.h"
+#include "vm/EqualityOperations.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSContext.h"
+#include "vm/RecordTupleShared.h"
+#include "vm/RecordType.h"
+#include "vm/SelfHosting.h"
+#include "vm/ToSource.h"
+
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+
+static bool TupleConstructor(JSContext* cx, unsigned argc, Value* vp);
+
+static const JSFunctionSpec tuple_static_methods[] = {
+ JS_FN("isTuple", tuple_is_tuple, 1, 0),
+ JS_SELF_HOSTED_FN("from", "TupleFrom", 1, 0), JS_FN("of", tuple_of, 0, 0),
+ JS_FS_END};
+
+static const JSFunctionSpec tuple_methods[] = {
+ JS_SELF_HOSTED_FN("toSorted", "TupleToSorted", 1, 0),
+ JS_SELF_HOSTED_FN("toSpliced", "TupleToSpliced", 2, 0),
+ JS_SELF_HOSTED_FN("concat", "TupleConcat", 0, 0),
+ JS_SELF_HOSTED_FN("includes", "TupleIncludes", 1, 0),
+ JS_SELF_HOSTED_FN("indexOf", "TupleIndexOf", 1, 0),
+ JS_SELF_HOSTED_FN("join", "TupleJoin", 1, 0),
+ JS_SELF_HOSTED_FN("lastIndexOf", "TupleLastIndexOf", 1, 0),
+ JS_SELF_HOSTED_FN("toLocaleString", "TupleToLocaleString", 2, 0),
+ JS_SELF_HOSTED_FN("toString", "TupleToString", 0, 0),
+ JS_SELF_HOSTED_FN("entries", "TupleEntries", 0, 0),
+ JS_SELF_HOSTED_FN("every", "TupleEvery", 1, 0),
+ JS_SELF_HOSTED_FN("filter", "TupleFilter", 1, 0),
+ JS_SELF_HOSTED_FN("find", "TupleFind", 1, 0),
+ JS_SELF_HOSTED_FN("findIndex", "TupleFindIndex", 1, 0),
+ JS_SELF_HOSTED_FN("forEach", "TupleForEach", 1, 0),
+ JS_SELF_HOSTED_FN("keys", "TupleKeys", 0, 0),
+ JS_SELF_HOSTED_FN("map", "TupleMap", 1, 0),
+ JS_SELF_HOSTED_FN("reduce", "TupleReduce", 1, 0),
+ JS_SELF_HOSTED_FN("reduceRight", "TupleReduceRight", 1, 0),
+ JS_SELF_HOSTED_FN("some", "TupleSome", 1, 0),
+ JS_SELF_HOSTED_FN("values", "$TupleValues", 0, 0),
+ JS_SELF_HOSTED_SYM_FN(iterator, "$TupleValues", 0, 0),
+ JS_SELF_HOSTED_FN("flat", "TupleFlat", 0, 0),
+ JS_SELF_HOSTED_FN("flatMap", "TupleFlatMap", 1, 0),
+ JS_SELF_HOSTED_FN("toReversed", "TupleToReversed", 0, 0),
+ JS_FN("with", tuple_with, 2, 0),
+ JS_FN("slice", tuple_slice, 2, 0),
+ JS_FN("valueOf", tuple_value_of, 0, 0),
+ JS_FS_END};
+
+Shape* TupleType::getInitialShape(JSContext* cx) {
+ return SharedShape::getInitialShape(cx, &TupleType::class_, cx->realm(),
+ TaggedProto(nullptr), 0);
+ // tuples don't have slots, but only integer-indexed elements.
+}
+
+// Prototype methods
+
+// Proposal
+// Tuple.prototype.with()
+bool js::tuple_with(JSContext* cx, unsigned argc, Value* vp) {
+ AutoGeckoProfilerEntry pseudoFrame(
+ cx, "Tuple.prototype.with", JS::ProfilingCategoryPair::JS,
+ uint32_t(ProfilingStackFrame::Flags::RELEVANT_FOR_JS));
+
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ /* Step 1. */
+ RootedValue v(cx, args.thisv());
+
+ mozilla::Maybe<TupleType&> maybeTuple = js::ThisTupleValue(cx, v);
+ if (!maybeTuple) {
+ return false;
+ }
+
+ Rooted<TupleType*> tuple(cx, &(*maybeTuple));
+
+ /* Step 2. */
+ uint64_t length = tuple->getDenseInitializedLength();
+ TupleType* list = TupleType::createUninitialized(cx, length);
+ if (!list) {
+ return false;
+ }
+
+ /* Step 4 */
+ uint64_t index;
+ if (!ToIndex(cx, args.get(0), JSMSG_BAD_TUPLE_INDEX, &index)) {
+ return false;
+ }
+ /* Step 5 */
+ if (index >= length) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_TUPLE_INDEX, "Tuple.with");
+ return false;
+ }
+ /* Step 6 */
+ RootedValue value(cx, args.get(1));
+ if (value.isObject()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_RECORD_TUPLE_NO_OBJECT, "Tuple.with");
+ return false;
+ }
+ /* Step 7 */
+ uint64_t before = index;
+ uint64_t after = length - index - 1;
+ list->copyDenseElements(0, tuple->getDenseElements(), before);
+ list->setDenseInitializedLength(index + 1);
+ list->initDenseElement(index, value);
+ list->copyDenseElements(
+ index + 1, tuple->getDenseElements() + uint32_t(index + 1), after);
+ list->setDenseInitializedLength(length);
+ list->finishInitialization(cx);
+ /* Step 8 */
+ args.rval().setExtendedPrimitive(*list);
+ return true;
+}
+
+// Proposal
+// Tuple.prototype.slice()
+bool js::tuple_slice(JSContext* cx, unsigned argc, Value* vp) {
+ AutoGeckoProfilerEntry pseudoFrame(
+ cx, "Tuple.prototype.slice", JS::ProfilingCategoryPair::JS,
+ uint32_t(ProfilingStackFrame::Flags::RELEVANT_FOR_JS));
+
+ CallArgs args = CallArgsFromVp(argc, vp);
+ RootedValue v(cx, args.thisv());
+
+ /* Steps 1-2. */
+ mozilla::Maybe<TupleType&> maybeList = js::ThisTupleValue(cx, v);
+ if (!maybeList) {
+ return false;
+ }
+
+ Rooted<TupleType*> list(cx, &(*maybeList));
+ /* Step 3. */
+ uint32_t len = list->getDenseInitializedLength();
+
+ /* Step 4. */
+ double relativeStart;
+ if (!ToInteger(cx, args.get(0), &relativeStart)) {
+ return false;
+ }
+
+ /* Step 5. */
+ uint32_t k;
+ if (relativeStart < 0.0) {
+ k = std::max(len + relativeStart, 0.0);
+ } else {
+ k = std::min(relativeStart, double(len));
+ }
+
+ /* Step 6. */
+ double relativeEnd;
+ if (argc > 1 && !args.get(1).isUndefined()) {
+ if (!ToInteger(cx, args.get(1), &relativeEnd)) {
+ return false;
+ }
+ } else {
+ relativeEnd = len;
+ }
+
+ /* Step 7. */
+ uint32_t finalIndex;
+ if (relativeEnd < 0.0) {
+ finalIndex = std::max(len + relativeEnd, 0.0);
+ } else {
+ finalIndex = std::min(relativeEnd, double(len));
+ }
+
+ /* Step 8. */
+
+ uint32_t newLen = finalIndex >= k ? finalIndex - k : 0;
+ TupleType* newList = TupleType::createUninitialized(cx, newLen);
+ if (!newList) {
+ return false;
+ }
+
+ /* Step 9. */
+ HeapSlotArray oldElements = list->getDenseElements();
+ newList->copyDenseElements(0, oldElements + k, newLen);
+ newList->setDenseInitializedLength(newLen);
+ newList->finishInitialization(cx);
+ /* Step 10. */
+ args.rval().setExtendedPrimitive(*newList);
+ return true;
+}
+
+// Proposal
+// Tuple.prototype.valueOf()
+bool js::tuple_value_of(JSContext* cx, unsigned argc, Value* vp) {
+ AutoGeckoProfilerEntry pseudoFrame(
+ cx, "Tuple.prototype.valueOf", JS::ProfilingCategoryPair::JS,
+ uint32_t(ProfilingStackFrame::Flags::RELEVANT_FOR_JS));
+
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ /* Step 1. */
+ HandleValue thisv = args.thisv();
+ mozilla::Maybe<TupleType&> tuple = js::ThisTupleValue(cx, thisv);
+ if (!tuple) {
+ return false;
+ }
+
+ args.rval().setExtendedPrimitive(*tuple);
+ return true;
+}
+
+bool TupleType::copy(JSContext* cx, Handle<TupleType*> in,
+ MutableHandle<TupleType*> out) {
+ out.set(TupleType::createUninitialized(cx, in->length()));
+ if (!out) {
+ return false;
+ }
+ RootedValue v(cx), vCopy(cx);
+ for (uint32_t i = 0; i < in->length(); i++) {
+ // Let v = in[i]
+ v.set(in->getDenseElement(i));
+
+ // Copy v
+ if (!CopyRecordTupleElement(cx, v, &vCopy)) {
+ return false;
+ }
+
+ // Set result[i] to v
+ if (!out->initializeNextElement(cx, vCopy)) {
+ return false;
+ }
+ }
+ out->finishInitialization(cx);
+ return true;
+}
+
+TupleType* TupleType::create(JSContext* cx, uint32_t length,
+ const Value* elements) {
+ for (uint32_t index = 0; index < length; index++) {
+ if (!elements[index].isPrimitive()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_RECORD_TUPLE_NO_OBJECT);
+ return nullptr;
+ }
+ }
+
+ TupleType* tup = TupleType::createUninitialized(cx, length);
+ if (!tup) {
+ return nullptr;
+ }
+
+ tup->initDenseElements(elements, length);
+ tup->finishInitialization(cx);
+
+ return tup;
+}
+
+static TupleType* allocate(JSContext* cx, gc::AllocKind allocKind) {
+ Rooted<Shape*> shape(cx, TupleType::getInitialShape(cx));
+ if (!shape) {
+ return nullptr;
+ }
+
+ TupleType* tup =
+ cx->newCell<TupleType>(allocKind, gc::Heap::Default, &TupleType::class_);
+ if (!tup) {
+ return nullptr;
+ }
+
+ tup->initShape(shape);
+ tup->initEmptyDynamicSlots();
+ tup->initFixedElements(allocKind, 0);
+ return tup;
+}
+
+TupleType* TupleType::createUninitialized(JSContext* cx, uint32_t length) {
+ gc::AllocKind allocKind = GuessArrayGCKind(length);
+
+ TupleType* tup = allocate(cx, allocKind);
+ if (!tup) {
+ return nullptr;
+ }
+
+ if (!tup->ensureElements(cx, length)) {
+ return nullptr;
+ }
+
+ return tup;
+}
+
+bool TupleType::initializeNextElement(JSContext* cx, HandleValue elt) {
+ if (!elt.isPrimitive()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_RECORD_TUPLE_NO_OBJECT);
+ return false;
+ }
+
+ uint32_t length = getDenseInitializedLength();
+
+ if (!ensureElements(cx, length + 1)) {
+ return false;
+ }
+ setDenseInitializedLength(length + 1);
+ initDenseElement(length, elt);
+
+ return true;
+}
+
+void TupleType::finishInitialization(JSContext* cx) {
+ shrinkCapacityToInitializedLength(cx);
+
+ ObjectElements* header = getElementsHeader();
+ header->length = header->initializedLength;
+ header->setNotExtensible();
+ header->seal();
+ header->freeze();
+}
+
+bool TupleType::getOwnProperty(HandleId id, MutableHandleValue vp) const {
+ if (!id.isInt()) {
+ return false;
+ }
+
+ int32_t index = id.toInt();
+ if (index < 0 || uint32_t(index) >= length()) {
+ return false;
+ }
+
+ vp.set(getDenseElement(index));
+ return true;
+}
+
+js::HashNumber TupleType::hash(const TupleType::ElementHasher& hasher) const {
+ MOZ_ASSERT(isAtomized());
+
+ js::HashNumber h = mozilla::HashGeneric(length());
+ for (uint32_t i = 0; i < length(); i++) {
+ h = mozilla::AddToHash(h, hasher(getDenseElement(i)));
+ }
+ return h;
+}
+
+bool TupleType::ensureAtomized(JSContext* cx) {
+ if (isAtomized()) {
+ return true;
+ }
+
+ RootedValue child(cx);
+ bool changed;
+
+ for (uint32_t i = 0; i < length(); i++) {
+ child.set(getDenseElement(i));
+ if (!EnsureAtomized(cx, &child, &changed)) {
+ return false;
+ }
+ if (changed) {
+ // We cannot use setDenseElement(), because this object is frozen.
+ elements_[i].set(this, HeapSlot::Element, unshiftedIndex(i), child);
+ }
+ }
+
+ getElementsHeader()->setTupleIsAtomized();
+
+ return true;
+}
+
+bool TupleType::sameValueZero(JSContext* cx, TupleType* lhs, TupleType* rhs,
+ bool* equal) {
+ return sameValueWith<SameValueZero>(cx, lhs, rhs, equal);
+}
+
+bool TupleType::sameValue(JSContext* cx, TupleType* lhs, TupleType* rhs,
+ bool* equal) {
+ return sameValueWith<SameValue>(cx, lhs, rhs, equal);
+}
+bool TupleType::sameValueZero(TupleType* lhs, TupleType* rhs) {
+ MOZ_ASSERT(lhs->isAtomized());
+ MOZ_ASSERT(rhs->isAtomized());
+
+ if (lhs == rhs) {
+ return true;
+ }
+ if (lhs->length() != rhs->length()) {
+ return false;
+ }
+
+ Value v1, v2;
+
+ for (uint32_t index = 0; index < lhs->length(); index++) {
+ v1 = lhs->getDenseElement(index);
+ v2 = rhs->getDenseElement(index);
+
+ if (!js::SameValueZeroLinear(v1, v2)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <bool Comparator(JSContext*, HandleValue, HandleValue, bool*)>
+bool TupleType::sameValueWith(JSContext* cx, TupleType* lhs, TupleType* rhs,
+ bool* equal) {
+ MOZ_ASSERT(lhs->getElementsHeader()->isFrozen());
+ MOZ_ASSERT(rhs->getElementsHeader()->isFrozen());
+
+ if (lhs == rhs) {
+ *equal = true;
+ return true;
+ }
+
+ if (lhs->length() != rhs->length()) {
+ *equal = false;
+ return true;
+ }
+
+ *equal = true;
+
+ RootedValue v1(cx);
+ RootedValue v2(cx);
+
+ for (uint32_t index = 0; index < lhs->length(); index++) {
+ v1.set(lhs->getDenseElement(index));
+ v2.set(rhs->getDenseElement(index));
+
+ if (!Comparator(cx, v1, v2, equal)) {
+ return false;
+ }
+
+ if (!*equal) {
+ return true;
+ }
+ }
+
+ return true;
+}
+
+JSString* js::TupleToSource(JSContext* cx, Handle<TupleType*> tup) {
+ JSStringBuilder sb(cx);
+
+ if (!sb.append("#[")) {
+ return nullptr;
+ }
+
+ uint32_t length = tup->length();
+
+ RootedValue elt(cx);
+ for (uint32_t index = 0; index < length; index++) {
+ elt.set(tup->getDenseElement(index));
+
+ /* Get element's character string. */
+ JSString* str = ValueToSource(cx, elt);
+ if (!str) {
+ return nullptr;
+ }
+
+ /* Append element to buffer. */
+ if (!sb.append(str)) {
+ return nullptr;
+ }
+ if (index + 1 != length) {
+ if (!sb.append(", ")) {
+ return nullptr;
+ }
+ }
+ }
+
+ /* Finalize the buffer. */
+ if (!sb.append(']')) {
+ return nullptr;
+ }
+
+ return sb.finishString();
+}
+
+// Record and Tuple proposal section 9.2.1
+bool TupleConstructor(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Step 1.
+ if (args.isConstructing()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_NOT_CONSTRUCTOR, "Tuple");
+ return false;
+ }
+
+ TupleType* tup = TupleType::create(cx, args.length(), args.array());
+ if (!tup) {
+ return false;
+ }
+
+ args.rval().setExtendedPrimitive(*tup);
+ return true;
+}
+
+/*===========================================================================*\
+ BEGIN: Tuple.prototype methods
+\*===========================================================================*/
+
+static bool ArrayToTuple(JSContext* cx, const CallArgs& args) {
+ Rooted<ArrayObject*> aObj(cx, &args.rval().toObject().as<ArrayObject>());
+ TupleType* tup = TupleType::createUnchecked(cx, aObj);
+
+ if (!tup) {
+ return false;
+ }
+
+ args.rval().setExtendedPrimitive(*tup);
+ return true;
+}
+
+// Takes an array as a single argument and returns a tuple of the
+// array elements. This method copies the array, because the callee
+// may still hold a pointer to it and it would break garbage collection
+// to change the type of the object from ArrayObject to TupleType (which
+// is the only way to re-use the same object if it has fixed elements.)
+// Should only be called from self-hosted tuple methods;
+// assumes all elements are non-objects and the array is packed
+bool js::tuple_construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ MOZ_ASSERT(args[0].toObject().is<ArrayObject>());
+
+ args.rval().set(args[0]);
+ return ArrayToTuple(cx, args);
+}
+
+bool js::tuple_is_tuple(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return IsTupleUnchecked(cx, args);
+}
+
+TupleType* TupleType::createUnchecked(JSContext* cx,
+ Handle<ArrayObject*> aObj) {
+ size_t len = aObj->getDenseInitializedLength();
+ MOZ_ASSERT(aObj->getElementsHeader()->numShiftedElements() == 0);
+ TupleType* tup = createUninitialized(cx, len);
+ if (!tup) {
+ return nullptr;
+ }
+ tup->initDenseElements(aObj, 0, len);
+ tup->finishInitialization(cx);
+ return tup;
+}
+
+bool js::tuple_of(JSContext* cx, unsigned argc, Value* vp) {
+ /* Step 1 */
+ CallArgs args = CallArgsFromVp(argc, vp);
+ size_t len = args.length();
+ Value* items = args.array();
+
+ /* Step 2 */
+ for (size_t i = 0; i < len; i++) {
+ if (items[i].isObject()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_RECORD_TUPLE_NO_OBJECT, "Tuple.of");
+ return false;
+ }
+ }
+ /* Step 3 */
+ ArrayObject* result = js::NewDenseCopiedArray(cx, len, items, GenericObject);
+ if (!result) {
+ return false;
+ }
+ args.rval().setObject(*result);
+ /* Step 4 */
+ return ArrayToTuple(cx, args);
+}
+
+bool js::IsTuple(const Value& v) {
+ if (v.isExtendedPrimitive()) return v.toExtendedPrimitive().is<TupleType>();
+ if (v.isObject()) return v.toObject().is<TupleObject>();
+ return false;
+}
+
+// Caller is responsible for rooting the result
+TupleType& TupleType::thisTupleValue(const Value& val) {
+ MOZ_ASSERT(IsTuple(val));
+ return (val.isExtendedPrimitive() ? val.toExtendedPrimitive().as<TupleType>()
+ : val.toObject().as<TupleObject>().unbox());
+}
+
+bool HandleIsTuple(HandleValue v) { return IsTuple(v.get()); }
+
+// 8.2.3.2 get Tuple.prototype.length
+bool lengthAccessor_impl(JSContext* cx, const CallArgs& args) {
+ // Step 1.
+ TupleType& tuple = TupleType::thisTupleValue(args.thisv().get());
+ // Step 2.
+ args.rval().setInt32(tuple.length());
+ return true;
+}
+
+bool TupleType::lengthAccessor(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<HandleIsTuple, lengthAccessor_impl>(cx, args);
+}
+
+/*===========================================================================*\
+ END: Tuple.prototype methods
+\*===========================================================================*/
+
+const JSClass TupleType::class_ = {"tuple", 0, JS_NULL_CLASS_OPS,
+ &TupleType::classSpec_};
+
+const JSClass TupleType::protoClass_ = {
+ "Tuple.prototype", JSCLASS_HAS_CACHED_PROTO(JSProto_Tuple),
+ JS_NULL_CLASS_OPS, &TupleType::classSpec_};
+
+/* static */ const JSPropertySpec properties_[] = {
+ JS_STRING_SYM_PS(toStringTag, "Tuple", JSPROP_READONLY),
+ JS_PSG("length", TupleType::lengthAccessor, 0), JS_PS_END};
+
+const ClassSpec TupleType::classSpec_ = {
+ GenericCreateConstructor<TupleConstructor, 0, gc::AllocKind::FUNCTION>,
+ GenericCreatePrototype<TupleType>,
+ tuple_static_methods,
+ nullptr,
+ tuple_methods,
+ properties_,
+ nullptr};
diff --git a/js/src/vm/TupleType.h b/js/src/vm/TupleType.h
new file mode 100644
index 0000000000..efeafcac15
--- /dev/null
+++ b/js/src/vm/TupleType.h
@@ -0,0 +1,87 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_TupleType_h
+#define vm_TupleType_h
+
+#include <cstdint>
+#include <functional>
+#include "vm/JSContext.h"
+#include "vm/NativeObject.h"
+
+namespace JS {
+
+class TupleType final : public js::NativeObject {
+ public:
+ static const js::ClassSpec classSpec_;
+ static const JSClass class_;
+ static const JSClass protoClass_;
+
+ public:
+ static TupleType* create(JSContext* cx, uint32_t length,
+ const Value* elements);
+
+ static TupleType* createUninitialized(JSContext* cx, uint32_t initialLength);
+
+ static TupleType* createUnchecked(JSContext* cx,
+ Handle<js::ArrayObject*> aObj);
+
+ bool initializeNextElement(JSContext* cx, HandleValue elt);
+ void finishInitialization(JSContext* cx);
+ static js::Shape* getInitialShape(JSContext* cx);
+
+ static bool copy(JSContext* cx, Handle<TupleType*> in,
+ MutableHandle<TupleType*> out);
+
+ bool getOwnProperty(HandleId id, MutableHandleValue vp) const;
+ inline uint32_t length() const { return getElementsHeader()->length; }
+
+ // Methods defined on Tuple.prototype
+ [[nodiscard]] static bool lengthAccessor(JSContext* cx, unsigned argc,
+ Value* vp);
+
+ // Comparison functions
+ static bool sameValueZero(JSContext* cx, TupleType* lhs, TupleType* rhs,
+ bool* equal);
+ static bool sameValue(JSContext* cx, TupleType* lhs, TupleType* rhs,
+ bool* equal);
+
+ using ElementHasher = std::function<js::HashNumber(const Value& child)>;
+ js::HashNumber hash(const ElementHasher& hasher) const;
+
+ bool ensureAtomized(JSContext* cx);
+ bool isAtomized() const { return getElementsHeader()->tupleIsAtomized(); }
+
+ // This can be used to compare atomized tuples.
+ static bool sameValueZero(TupleType* lhs, TupleType* rhs);
+
+ static TupleType& thisTupleValue(const Value& val);
+
+ private:
+ template <bool Comparator(JSContext*, HandleValue, HandleValue, bool*)>
+ static bool sameValueWith(JSContext* cx, TupleType* lhs, TupleType* rhs,
+ bool* equal);
+};
+
+} // namespace JS
+
+namespace js {
+
+extern JSString* TupleToSource(JSContext* cx, Handle<TupleType*> tup);
+
+bool IsTuple(const Value& v);
+
+extern bool tuple_toReversed(JSContext* cx, unsigned argc, Value* vp);
+extern bool tuple_with(JSContext* cx, unsigned argc, Value* vp);
+extern bool tuple_slice(JSContext* cx, unsigned argc, Value* vp);
+extern bool tuple_is_tuple(JSContext* cx, unsigned argc, Value* vp);
+extern bool tuple_value_of(JSContext* cx, unsigned argc, Value* vp);
+extern bool tuple_of(JSContext* cx, unsigned argc, Value* vp);
+extern bool tuple_construct(JSContext* cx, unsigned argc, Value* vp);
+
+} // namespace js
+
+#endif
diff --git a/js/src/vm/TypedArrayObject-inl.h b/js/src/vm/TypedArrayObject-inl.h
new file mode 100644
index 0000000000..f7318116ab
--- /dev/null
+++ b/js/src/vm/TypedArrayObject-inl.h
@@ -0,0 +1,769 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_TypedArrayObject_inl_h
+#define vm_TypedArrayObject_inl_h
+
+/* Utilities and common inline code for TypedArray */
+
+#include "vm/TypedArrayObject.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/FloatingPoint.h"
+
+#include <algorithm>
+#include <type_traits>
+
+#include "jsnum.h"
+
+#include "gc/Zone.h"
+#include "jit/AtomicOperations.h"
+#include "js/Conversions.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "js/Value.h"
+#include "util/DifferentialTesting.h"
+#include "util/Memory.h"
+#include "vm/ArrayObject.h"
+#include "vm/BigIntType.h"
+#include "vm/NativeObject.h"
+#include "vm/Uint8Clamped.h"
+
+#include "gc/ObjectKind-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/ObjectOperations-inl.h"
+
+namespace js {
+
+template <typename To, typename From>
+inline To ConvertNumber(From src);
+
+template <>
+inline int8_t ConvertNumber<int8_t, float>(float src) {
+ return JS::ToInt8(src);
+}
+
+template <>
+inline uint8_t ConvertNumber<uint8_t, float>(float src) {
+ return JS::ToUint8(src);
+}
+
+template <>
+inline uint8_clamped ConvertNumber<uint8_clamped, float>(float src) {
+ return uint8_clamped(src);
+}
+
+template <>
+inline int16_t ConvertNumber<int16_t, float>(float src) {
+ return JS::ToInt16(src);
+}
+
+template <>
+inline uint16_t ConvertNumber<uint16_t, float>(float src) {
+ return JS::ToUint16(src);
+}
+
+template <>
+inline int32_t ConvertNumber<int32_t, float>(float src) {
+ return JS::ToInt32(src);
+}
+
+template <>
+inline uint32_t ConvertNumber<uint32_t, float>(float src) {
+ return JS::ToUint32(src);
+}
+
+template <>
+inline int64_t ConvertNumber<int64_t, float>(float src) {
+ return JS::ToInt64(src);
+}
+
+template <>
+inline uint64_t ConvertNumber<uint64_t, float>(float src) {
+ return JS::ToUint64(src);
+}
+
+template <>
+inline int8_t ConvertNumber<int8_t, double>(double src) {
+ return JS::ToInt8(src);
+}
+
+template <>
+inline uint8_t ConvertNumber<uint8_t, double>(double src) {
+ return JS::ToUint8(src);
+}
+
+template <>
+inline uint8_clamped ConvertNumber<uint8_clamped, double>(double src) {
+ return uint8_clamped(src);
+}
+
+template <>
+inline int16_t ConvertNumber<int16_t, double>(double src) {
+ return JS::ToInt16(src);
+}
+
+template <>
+inline uint16_t ConvertNumber<uint16_t, double>(double src) {
+ return JS::ToUint16(src);
+}
+
+template <>
+inline int32_t ConvertNumber<int32_t, double>(double src) {
+ return JS::ToInt32(src);
+}
+
+template <>
+inline uint32_t ConvertNumber<uint32_t, double>(double src) {
+ return JS::ToUint32(src);
+}
+
+template <>
+inline int64_t ConvertNumber<int64_t, double>(double src) {
+ return JS::ToInt64(src);
+}
+
+template <>
+inline uint64_t ConvertNumber<uint64_t, double>(double src) {
+ return JS::ToUint64(src);
+}
+
+template <typename To, typename From>
+inline To ConvertNumber(From src) {
+ static_assert(
+ !std::is_floating_point_v<From> ||
+ (std::is_floating_point_v<From> && std::is_floating_point_v<To>),
+ "conversion from floating point to int should have been handled by "
+ "specializations above");
+ return To(src);
+}
+
+template <typename NativeType>
+struct TypeIDOfType;
+template <>
+struct TypeIDOfType<int8_t> {
+ static const Scalar::Type id = Scalar::Int8;
+ static const JSProtoKey protoKey = JSProto_Int8Array;
+};
+template <>
+struct TypeIDOfType<uint8_t> {
+ static const Scalar::Type id = Scalar::Uint8;
+ static const JSProtoKey protoKey = JSProto_Uint8Array;
+};
+template <>
+struct TypeIDOfType<int16_t> {
+ static const Scalar::Type id = Scalar::Int16;
+ static const JSProtoKey protoKey = JSProto_Int16Array;
+};
+template <>
+struct TypeIDOfType<uint16_t> {
+ static const Scalar::Type id = Scalar::Uint16;
+ static const JSProtoKey protoKey = JSProto_Uint16Array;
+};
+template <>
+struct TypeIDOfType<int32_t> {
+ static const Scalar::Type id = Scalar::Int32;
+ static const JSProtoKey protoKey = JSProto_Int32Array;
+};
+template <>
+struct TypeIDOfType<uint32_t> {
+ static const Scalar::Type id = Scalar::Uint32;
+ static const JSProtoKey protoKey = JSProto_Uint32Array;
+};
+template <>
+struct TypeIDOfType<int64_t> {
+ static const Scalar::Type id = Scalar::BigInt64;
+ static const JSProtoKey protoKey = JSProto_BigInt64Array;
+};
+template <>
+struct TypeIDOfType<uint64_t> {
+ static const Scalar::Type id = Scalar::BigUint64;
+ static const JSProtoKey protoKey = JSProto_BigUint64Array;
+};
+template <>
+struct TypeIDOfType<float> {
+ static const Scalar::Type id = Scalar::Float32;
+ static const JSProtoKey protoKey = JSProto_Float32Array;
+};
+template <>
+struct TypeIDOfType<double> {
+ static const Scalar::Type id = Scalar::Float64;
+ static const JSProtoKey protoKey = JSProto_Float64Array;
+};
+template <>
+struct TypeIDOfType<uint8_clamped> {
+ static const Scalar::Type id = Scalar::Uint8Clamped;
+ static const JSProtoKey protoKey = JSProto_Uint8ClampedArray;
+};
+
+class SharedOps {
+ public:
+ template <typename T>
+ static T load(SharedMem<T*> addr) {
+ return js::jit::AtomicOperations::loadSafeWhenRacy(addr);
+ }
+
+ template <typename T>
+ static void store(SharedMem<T*> addr, T value) {
+ js::jit::AtomicOperations::storeSafeWhenRacy(addr, value);
+ }
+
+ template <typename T>
+ static void memcpy(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
+ js::jit::AtomicOperations::memcpySafeWhenRacy(dest, src, size);
+ }
+
+ template <typename T>
+ static void memmove(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
+ js::jit::AtomicOperations::memmoveSafeWhenRacy(dest, src, size);
+ }
+
+ template <typename T>
+ static void podCopy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
+ js::jit::AtomicOperations::podCopySafeWhenRacy(dest, src, nelem);
+ }
+
+ template <typename T>
+ static void podMove(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
+ js::jit::AtomicOperations::podMoveSafeWhenRacy(dest, src, nelem);
+ }
+
+ static SharedMem<void*> extract(TypedArrayObject* obj) {
+ return obj->dataPointerEither();
+ }
+};
+
+class UnsharedOps {
+ public:
+ template <typename T>
+ static T load(SharedMem<T*> addr) {
+ return *addr.unwrapUnshared();
+ }
+
+ template <typename T>
+ static void store(SharedMem<T*> addr, T value) {
+ *addr.unwrapUnshared() = value;
+ }
+
+ template <typename T>
+ static void memcpy(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
+ ::memcpy(dest.unwrapUnshared(), src.unwrapUnshared(), size);
+ }
+
+ template <typename T>
+ static void memmove(SharedMem<T*> dest, SharedMem<T*> src, size_t size) {
+ ::memmove(dest.unwrapUnshared(), src.unwrapUnshared(), size);
+ }
+
+ template <typename T>
+ static void podCopy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
+ // std::copy_n better matches the argument values/types of this
+ // function, but as noted below it allows the input/output ranges to
+ // overlap. std::copy does not, so use it so the compiler has extra
+ // ability to optimize.
+ const auto* first = src.unwrapUnshared();
+ const auto* last = first + nelem;
+ auto* result = dest.unwrapUnshared();
+ std::copy(first, last, result);
+ }
+
+ template <typename T>
+ static void podMove(SharedMem<T*> dest, SharedMem<T*> src, size_t n) {
+ // std::copy_n copies from |src| to |dest| starting from |src|, so
+ // input/output ranges *may* permissibly overlap, as this function
+ // allows.
+ const auto* start = src.unwrapUnshared();
+ auto* result = dest.unwrapUnshared();
+ std::copy_n(start, n, result);
+ }
+
+ static SharedMem<void*> extract(TypedArrayObject* obj) {
+ return SharedMem<void*>::unshared(obj->dataPointerUnshared());
+ }
+};
+
+template <typename T, typename Ops>
+class ElementSpecific {
+ public:
+ /*
+ * Copy |source|'s elements into |target|, starting at |target[offset]|.
+ * Act as if the assignments occurred from a fresh copy of |source|, in
+ * case the two memory ranges overlap.
+ */
+ static bool setFromTypedArray(Handle<TypedArrayObject*> target,
+ Handle<TypedArrayObject*> source,
+ size_t offset) {
+ // WARNING: |source| may be an unwrapped typed array from a different
+ // compartment. Proceed with caution!
+
+ MOZ_ASSERT(TypeIDOfType<T>::id == target->type(),
+ "calling wrong setFromTypedArray specialization");
+ MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
+ MOZ_ASSERT(!source->hasDetachedBuffer(), "source isn't detached");
+
+ MOZ_ASSERT(offset <= target->length());
+ MOZ_ASSERT(source->length() <= target->length() - offset);
+
+ if (TypedArrayObject::sameBuffer(target, source)) {
+ return setFromOverlappingTypedArray(target, source, offset);
+ }
+
+ SharedMem<T*> dest =
+ target->dataPointerEither().template cast<T*>() + offset;
+ size_t count = source->length();
+
+ if (source->type() == target->type()) {
+ Ops::podCopy(dest, source->dataPointerEither().template cast<T*>(),
+ count);
+ return true;
+ }
+
+ SharedMem<void*> data = Ops::extract(source);
+ switch (source->type()) {
+ case Scalar::Int8: {
+ SharedMem<int8_t*> src = data.cast<int8_t*>();
+ for (size_t i = 0; i < count; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
+ }
+ break;
+ }
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped: {
+ SharedMem<uint8_t*> src = data.cast<uint8_t*>();
+ for (size_t i = 0; i < count; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
+ }
+ break;
+ }
+ case Scalar::Int16: {
+ SharedMem<int16_t*> src = data.cast<int16_t*>();
+ for (size_t i = 0; i < count; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
+ }
+ break;
+ }
+ case Scalar::Uint16: {
+ SharedMem<uint16_t*> src = data.cast<uint16_t*>();
+ for (size_t i = 0; i < count; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
+ }
+ break;
+ }
+ case Scalar::Int32: {
+ SharedMem<int32_t*> src = data.cast<int32_t*>();
+ for (size_t i = 0; i < count; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
+ }
+ break;
+ }
+ case Scalar::Uint32: {
+ SharedMem<uint32_t*> src = data.cast<uint32_t*>();
+ for (size_t i = 0; i < count; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
+ }
+ break;
+ }
+ case Scalar::BigInt64: {
+ SharedMem<int64_t*> src = data.cast<int64_t*>();
+ for (size_t i = 0; i < count; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
+ }
+ break;
+ }
+ case Scalar::BigUint64: {
+ SharedMem<uint64_t*> src = data.cast<uint64_t*>();
+ for (size_t i = 0; i < count; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
+ }
+ break;
+ }
+ case Scalar::Float32: {
+ SharedMem<float*> src = data.cast<float*>();
+ for (size_t i = 0; i < count; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
+ }
+ break;
+ }
+ case Scalar::Float64: {
+ SharedMem<double*> src = data.cast<double*>();
+ for (size_t i = 0; i < count; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("setFromTypedArray with a typed array with bogus type");
+ }
+
+ return true;
+ }
+
+ /*
+ * Copy |source[0]| to |source[len]| (exclusive) elements into the typed
+ * array |target|, starting at index |offset|. |source| must not be a
+ * typed array.
+ */
+ static bool setFromNonTypedArray(JSContext* cx,
+ Handle<TypedArrayObject*> target,
+ HandleObject source, size_t len,
+ size_t offset = 0) {
+ MOZ_ASSERT(target->type() == TypeIDOfType<T>::id,
+ "target type and NativeType must match");
+ MOZ_ASSERT(!source->is<TypedArrayObject>(),
+ "use setFromTypedArray instead of this method");
+ MOZ_ASSERT_IF(target->hasDetachedBuffer(), target->length() == 0);
+ MOZ_ASSERT_IF(!target->hasDetachedBuffer(), offset <= target->length());
+ MOZ_ASSERT_IF(!target->hasDetachedBuffer(),
+ len <= target->length() - offset);
+
+ size_t i = 0;
+ if (source->is<NativeObject>() && !target->hasDetachedBuffer()) {
+ // Attempt fast-path infallible conversion of dense elements up to
+ // the first potentially side-effectful lookup or conversion.
+ size_t bound = std::min<size_t>(
+ source->as<NativeObject>().getDenseInitializedLength(), len);
+
+ SharedMem<T*> dest =
+ target->dataPointerEither().template cast<T*>() + offset;
+
+ MOZ_ASSERT(!canConvertInfallibly(MagicValue(JS_ELEMENTS_HOLE)),
+ "the following loop must abort on holes");
+
+ const Value* srcValues = source->as<NativeObject>().getDenseElements();
+ for (; i < bound; i++) {
+ if (!canConvertInfallibly(srcValues[i])) {
+ break;
+ }
+ Ops::store(dest + i, infallibleValueToNative(srcValues[i]));
+ }
+ if (i == len) {
+ return true;
+ }
+ }
+
+ // Convert and copy any remaining elements generically.
+ RootedValue v(cx);
+ for (; i < len; i++) {
+ if constexpr (sizeof(i) == sizeof(uint32_t)) {
+ if (!GetElement(cx, source, source, uint32_t(i), &v)) {
+ return false;
+ }
+ } else {
+ if (!GetElementLargeIndex(cx, source, source, i, &v)) {
+ return false;
+ }
+ }
+
+ T n;
+ if (!valueToNative(cx, v, &n)) {
+ return false;
+ }
+
+ // Ignore out-of-bounds writes, but still execute getElement/valueToNative
+ // because of observable side-effects.
+ if (offset + i >= target->length()) {
+ continue;
+ }
+
+ MOZ_ASSERT(!target->hasDetachedBuffer());
+
+ // Compute every iteration in case getElement/valueToNative
+ // detaches the underlying array buffer or GC moves the data.
+ SharedMem<T*> dest =
+ target->dataPointerEither().template cast<T*>() + offset + i;
+ Ops::store(dest, n);
+ }
+
+ return true;
+ }
+
+ /*
+ * Copy |source| into the typed array |target|.
+ */
+ static bool initFromIterablePackedArray(JSContext* cx,
+ Handle<TypedArrayObject*> target,
+ Handle<ArrayObject*> source) {
+ MOZ_ASSERT(target->type() == TypeIDOfType<T>::id,
+ "target type and NativeType must match");
+ MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
+ MOZ_ASSERT(IsPackedArray(source), "source array must be packed");
+ MOZ_ASSERT(source->getDenseInitializedLength() <= target->length());
+
+ size_t len = source->getDenseInitializedLength();
+ size_t i = 0;
+
+ // Attempt fast-path infallible conversion of dense elements up to the
+ // first potentially side-effectful conversion.
+
+ SharedMem<T*> dest = target->dataPointerEither().template cast<T*>();
+
+ const Value* srcValues = source->getDenseElements();
+ for (; i < len; i++) {
+ if (!canConvertInfallibly(srcValues[i])) {
+ break;
+ }
+ Ops::store(dest + i, infallibleValueToNative(srcValues[i]));
+ }
+ if (i == len) {
+ return true;
+ }
+
+ // Convert any remaining elements by first collecting them into a
+ // temporary list, and then copying them into the typed array.
+ RootedValueVector values(cx);
+ if (!values.append(srcValues + i, len - i)) {
+ return false;
+ }
+
+ RootedValue v(cx);
+ for (size_t j = 0; j < values.length(); i++, j++) {
+ v = values[j];
+
+ T n;
+ if (!valueToNative(cx, v, &n)) {
+ return false;
+ }
+
+ // |target| is a newly allocated typed array and not yet visible to
+ // content script, so valueToNative can't detach the underlying
+ // buffer.
+ MOZ_ASSERT(i < target->length());
+
+ // Compute every iteration in case GC moves the data.
+ SharedMem<T*> newDest = target->dataPointerEither().template cast<T*>();
+ Ops::store(newDest + i, n);
+ }
+
+ return true;
+ }
+
+ private:
+ static bool setFromOverlappingTypedArray(Handle<TypedArrayObject*> target,
+ Handle<TypedArrayObject*> source,
+ size_t offset) {
+ // WARNING: |source| may be an unwrapped typed array from a different
+ // compartment. Proceed with caution!
+
+ MOZ_ASSERT(TypeIDOfType<T>::id == target->type(),
+ "calling wrong setFromTypedArray specialization");
+ MOZ_ASSERT(!target->hasDetachedBuffer(), "target isn't detached");
+ MOZ_ASSERT(!source->hasDetachedBuffer(), "source isn't detached");
+ MOZ_ASSERT(TypedArrayObject::sameBuffer(target, source),
+ "the provided arrays don't actually overlap, so it's "
+ "undesirable to use this method");
+
+ MOZ_ASSERT(offset <= target->length());
+ MOZ_ASSERT(source->length() <= target->length() - offset);
+
+ SharedMem<T*> dest =
+ target->dataPointerEither().template cast<T*>() + offset;
+ size_t len = source->length();
+
+ if (source->type() == target->type()) {
+ SharedMem<T*> src = source->dataPointerEither().template cast<T*>();
+ Ops::podMove(dest, src, len);
+ return true;
+ }
+
+ // Copy |source| in case it overlaps the target elements being set.
+ size_t sourceByteLen = len * source->bytesPerElement();
+ void* data = target->zone()->template pod_malloc<uint8_t>(sourceByteLen);
+ if (!data) {
+ return false;
+ }
+ Ops::memcpy(SharedMem<void*>::unshared(data), source->dataPointerEither(),
+ sourceByteLen);
+
+ switch (source->type()) {
+ case Scalar::Int8: {
+ int8_t* src = static_cast<int8_t*>(data);
+ for (size_t i = 0; i < len; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(*src++));
+ }
+ break;
+ }
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped: {
+ uint8_t* src = static_cast<uint8_t*>(data);
+ for (size_t i = 0; i < len; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(*src++));
+ }
+ break;
+ }
+ case Scalar::Int16: {
+ int16_t* src = static_cast<int16_t*>(data);
+ for (size_t i = 0; i < len; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(*src++));
+ }
+ break;
+ }
+ case Scalar::Uint16: {
+ uint16_t* src = static_cast<uint16_t*>(data);
+ for (size_t i = 0; i < len; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(*src++));
+ }
+ break;
+ }
+ case Scalar::Int32: {
+ int32_t* src = static_cast<int32_t*>(data);
+ for (size_t i = 0; i < len; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(*src++));
+ }
+ break;
+ }
+ case Scalar::Uint32: {
+ uint32_t* src = static_cast<uint32_t*>(data);
+ for (size_t i = 0; i < len; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(*src++));
+ }
+ break;
+ }
+ case Scalar::BigInt64: {
+ int64_t* src = static_cast<int64_t*>(data);
+ for (size_t i = 0; i < len; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(*src++));
+ }
+ break;
+ }
+ case Scalar::BigUint64: {
+ uint64_t* src = static_cast<uint64_t*>(data);
+ for (size_t i = 0; i < len; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(*src++));
+ }
+ break;
+ }
+ case Scalar::Float32: {
+ float* src = static_cast<float*>(data);
+ for (size_t i = 0; i < len; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(*src++));
+ }
+ break;
+ }
+ case Scalar::Float64: {
+ double* src = static_cast<double*>(data);
+ for (size_t i = 0; i < len; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(*src++));
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH(
+ "setFromOverlappingTypedArray with a typed array with bogus type");
+ }
+
+ js_free(data);
+ return true;
+ }
+
+ static bool canConvertInfallibly(const Value& v) {
+ if (TypeIDOfType<T>::id == Scalar::BigInt64 ||
+ TypeIDOfType<T>::id == Scalar::BigUint64) {
+ // Numbers, Null, Undefined, and Symbols throw a TypeError. Strings may
+ // OOM and Objects may have side-effects.
+ return v.isBigInt() || v.isBoolean();
+ }
+ // BigInts and Symbols throw a TypeError. Strings may OOM and Objects may
+ // have side-effects.
+ return v.isNumber() || v.isBoolean() || v.isNull() || v.isUndefined();
+ }
+
+ static T infallibleValueToNative(const Value& v) {
+ if (TypeIDOfType<T>::id == Scalar::BigInt64) {
+ if (v.isBigInt()) {
+ return T(BigInt::toInt64(v.toBigInt()));
+ }
+ return T(v.toBoolean());
+ }
+ if (TypeIDOfType<T>::id == Scalar::BigUint64) {
+ if (v.isBigInt()) {
+ return T(BigInt::toUint64(v.toBigInt()));
+ }
+ return T(v.toBoolean());
+ }
+ if (v.isInt32()) {
+ return T(v.toInt32());
+ }
+ if (v.isDouble()) {
+ return doubleToNative(v.toDouble());
+ }
+ if (v.isBoolean()) {
+ return T(v.toBoolean());
+ }
+ if (v.isNull()) {
+ return T(0);
+ }
+
+ MOZ_ASSERT(v.isUndefined());
+ return TypeIsFloatingPoint<T>() ? T(JS::GenericNaN()) : T(0);
+ }
+
+ static bool valueToNative(JSContext* cx, HandleValue v, T* result) {
+ MOZ_ASSERT(!v.isMagic());
+
+ if (MOZ_LIKELY(canConvertInfallibly(v))) {
+ *result = infallibleValueToNative(v);
+ return true;
+ }
+
+ if (std::is_same_v<T, int64_t>) {
+ JS_TRY_VAR_OR_RETURN_FALSE(cx, *result, ToBigInt64(cx, v));
+ return true;
+ }
+
+ if (std::is_same_v<T, uint64_t>) {
+ JS_TRY_VAR_OR_RETURN_FALSE(cx, *result, ToBigUint64(cx, v));
+ return true;
+ }
+
+ double d;
+ MOZ_ASSERT(v.isString() || v.isObject() || v.isSymbol() || v.isBigInt());
+ if (!(v.isString() ? StringToNumber(cx, v.toString(), &d)
+ : ToNumber(cx, v, &d))) {
+ return false;
+ }
+
+ *result = doubleToNative(d);
+ return true;
+ }
+
+ static T doubleToNative(double d) {
+ if (TypeIsFloatingPoint<T>()) {
+ // The JS spec doesn't distinguish among different NaN values, and
+ // it deliberately doesn't specify the bit pattern written to a
+ // typed array when NaN is written into it. This bit-pattern
+ // inconsistency could confuse differential testing, so always
+ // canonicalize NaN values in differential testing.
+ if (js::SupportDifferentialTesting()) {
+ d = JS::CanonicalizeNaN(d);
+ }
+ return T(d);
+ }
+ if (MOZ_UNLIKELY(std::isnan(d))) {
+ return T(0);
+ }
+ if (TypeIDOfType<T>::id == Scalar::Uint8Clamped) {
+ return T(d);
+ }
+ if (TypeIsUnsigned<T>()) {
+ return T(JS::ToUint32(d));
+ }
+ return T(JS::ToInt32(d));
+ }
+};
+
+/* static */ gc::AllocKind js::TypedArrayObject::AllocKindForLazyBuffer(
+ size_t nbytes) {
+ MOZ_ASSERT(nbytes <= INLINE_BUFFER_LIMIT);
+ if (nbytes == 0) {
+ nbytes += sizeof(uint8_t);
+ }
+ size_t dataSlots = AlignBytes(nbytes, sizeof(Value)) / sizeof(Value);
+ MOZ_ASSERT(nbytes <= dataSlots * sizeof(Value));
+ return gc::GetGCObjectKind(FIXED_DATA_START + dataSlots);
+}
+
+} // namespace js
+
+#endif // vm_TypedArrayObject_inl_h
diff --git a/js/src/vm/TypedArrayObject.cpp b/js/src/vm/TypedArrayObject.cpp
new file mode 100644
index 0000000000..e86d22b3b1
--- /dev/null
+++ b/js/src/vm/TypedArrayObject.cpp
@@ -0,0 +1,2998 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/TypedArrayObject-inl.h"
+#include "vm/TypedArrayObject.h"
+
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/IntegerTypeTraits.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/TextUtils.h"
+
+#include <algorithm>
+#include <iterator>
+#include <limits>
+#include <numeric>
+#include <string.h>
+#include <string_view>
+#if !defined(XP_WIN) && !defined(__wasi__)
+# include <sys/mman.h>
+#endif
+#include <type_traits>
+
+#include "jsnum.h"
+#include "jstypes.h"
+
+#include "builtin/Array.h"
+#include "builtin/DataViewObject.h"
+#include "gc/Barrier.h"
+#include "gc/MaybeRooted.h"
+#include "jit/InlinableNatives.h"
+#include "js/Conversions.h"
+#include "js/experimental/TypedData.h" // JS_GetArrayBufferViewType, JS_GetTypedArray{Length,ByteOffset,ByteLength}, JS_IsTypedArrayObject
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/PropertySpec.h"
+#include "js/ScalarType.h" // JS::Scalar::Type
+#include "js/UniquePtr.h"
+#include "js/Wrapper.h"
+#include "util/DifferentialTesting.h"
+#include "util/Text.h"
+#include "util/WindowsWrapper.h"
+#include "vm/ArrayBufferObject.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/GlobalObject.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/PIC.h"
+#include "vm/SelfHosting.h"
+#include "vm/SharedMem.h"
+#include "vm/Uint8Clamped.h"
+#include "vm/WrapperObject.h"
+
+#include "gc/Nursery-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/Compartment-inl.h"
+#include "vm/GeckoProfiler-inl.h"
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+
+using JS::CanonicalizeNaN;
+using JS::ToInt32;
+using JS::ToUint32;
+using mozilla::IsAsciiDigit;
+
+/*
+ * TypedArrayObject
+ *
+ * The non-templated base class for the specific typed implementations.
+ * This class holds all the member variables that are used by
+ * the subclasses.
+ */
+
+bool TypedArrayObject::convertForSideEffect(JSContext* cx,
+ HandleValue v) const {
+ switch (type()) {
+ case Scalar::BigInt64:
+ case Scalar::BigUint64: {
+ return ToBigInt(cx, v) != nullptr;
+ }
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ case Scalar::Float64:
+ case Scalar::Uint8Clamped: {
+ double ignore;
+ return ToNumber(cx, v, &ignore);
+ }
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+ MOZ_ASSERT_UNREACHABLE("Invalid scalar type");
+ return false;
+}
+
+/* static */
+bool TypedArrayObject::is(HandleValue v) {
+ return v.isObject() && v.toObject().is<TypedArrayObject>();
+}
+
+/* static */
+bool TypedArrayObject::ensureHasBuffer(JSContext* cx,
+ Handle<TypedArrayObject*> tarray) {
+ if (tarray->hasBuffer()) {
+ return true;
+ }
+
+ size_t byteLength = tarray->byteLength();
+
+ AutoRealm ar(cx, tarray);
+ Rooted<ArrayBufferObject*> buffer(
+ cx, ArrayBufferObject::createZeroed(cx, tarray->byteLength()));
+ if (!buffer) {
+ return false;
+ }
+
+ // Attaching the first view to an array buffer is infallible.
+ MOZ_ALWAYS_TRUE(buffer->addView(cx, tarray));
+
+ // tarray is not shared, because if it were it would have a buffer.
+ memcpy(buffer->dataPointer(), tarray->dataPointerUnshared(), byteLength);
+
+ // If the object is in the nursery, the buffer will be freed by the next
+ // nursery GC. Free the data slot pointer if the object has no inline data.
+ size_t nbytes = RoundUp(byteLength, sizeof(Value));
+ Nursery& nursery = cx->nursery();
+ if (tarray->isTenured() && !tarray->hasInlineElements() &&
+ !nursery.isInside(tarray->elements())) {
+ js_free(tarray->elements());
+ RemoveCellMemory(tarray, nbytes, MemoryUse::TypedArrayElements);
+ }
+
+ tarray->setFixedSlot(TypedArrayObject::DATA_SLOT,
+ PrivateValue(buffer->dataPointer()));
+ tarray->setFixedSlot(TypedArrayObject::BUFFER_SLOT, ObjectValue(*buffer));
+
+ return true;
+}
+
+#ifdef DEBUG
+void TypedArrayObject::assertZeroLengthArrayData() const {
+ if (length() == 0 && !hasBuffer()) {
+ uint8_t* end = fixedData(TypedArrayObject::FIXED_DATA_START);
+ MOZ_ASSERT(end[0] == ZeroLengthArrayData);
+ }
+}
+#endif
+
+void TypedArrayObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ MOZ_ASSERT(!IsInsideNursery(obj));
+ TypedArrayObject* curObj = &obj->as<TypedArrayObject>();
+
+ // Template objects or discarded objects (which didn't have enough room
+ // for inner elements) don't have anything to free.
+ if (!curObj->elementsRaw()) {
+ return;
+ }
+
+ curObj->assertZeroLengthArrayData();
+
+ // Typed arrays with a buffer object do not need to be free'd
+ if (curObj->hasBuffer()) {
+ return;
+ }
+
+ // Free the data slot pointer if it does not point into the old JSObject.
+ if (!curObj->hasInlineElements()) {
+ size_t nbytes = RoundUp(curObj->byteLength(), sizeof(Value));
+ gcx->free_(obj, curObj->elements(), nbytes, MemoryUse::TypedArrayElements);
+ }
+}
+
+/* static */
+size_t TypedArrayObject::objectMoved(JSObject* obj, JSObject* old) {
+ TypedArrayObject* newObj = &obj->as<TypedArrayObject>();
+ const TypedArrayObject* oldObj = &old->as<TypedArrayObject>();
+ MOZ_ASSERT(newObj->elementsRaw() == oldObj->elementsRaw());
+ MOZ_ASSERT(obj->isTenured());
+
+ // Typed arrays with a buffer object do not need an update.
+ if (oldObj->hasBuffer()) {
+ return 0;
+ }
+
+ if (!IsInsideNursery(old)) {
+ // Update the data slot pointer if it points to the old JSObject.
+ if (oldObj->hasInlineElements()) {
+ newObj->setInlineElements();
+ }
+
+ return 0;
+ }
+
+ void* buf = oldObj->elements();
+
+ // Discarded objects (which didn't have enough room for inner elements) don't
+ // have any data to move.
+ if (!buf) {
+ return 0;
+ }
+
+ Nursery& nursery = obj->runtimeFromMainThread()->gc.nursery();
+ if (!nursery.isInside(buf)) {
+ nursery.removeMallocedBufferDuringMinorGC(buf);
+ size_t nbytes = RoundUp(newObj->byteLength(), sizeof(Value));
+ AddCellMemory(newObj, nbytes, MemoryUse::TypedArrayElements);
+ return 0;
+ }
+
+ // Determine if we can use inline data for the target array. If this is
+ // possible, the nursery will have picked an allocation size that is large
+ // enough.
+ size_t nbytes = oldObj->byteLength();
+ MOZ_ASSERT(nbytes <= Nursery::MaxNurseryBufferSize);
+
+ constexpr size_t headerSize = dataOffset() + sizeof(HeapSlot);
+
+ // See AllocKindForLazyBuffer.
+ gc::AllocKind newAllocKind = obj->asTenured().getAllocKind();
+ MOZ_ASSERT_IF(nbytes == 0,
+ headerSize + sizeof(uint8_t) <= GetGCKindBytes(newAllocKind));
+
+ if (headerSize + nbytes <= GetGCKindBytes(newAllocKind)) {
+ MOZ_ASSERT(oldObj->hasInlineElements());
+#ifdef DEBUG
+ if (nbytes == 0) {
+ uint8_t* output = newObj->fixedData(TypedArrayObject::FIXED_DATA_START);
+ output[0] = ZeroLengthArrayData;
+ }
+#endif
+ newObj->setInlineElements();
+ } else {
+ MOZ_ASSERT(!oldObj->hasInlineElements());
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ nbytes = RoundUp(nbytes, sizeof(Value));
+ void* data = newObj->zone()->pod_arena_malloc<uint8_t>(
+ js::ArrayBufferContentsArena, nbytes);
+ if (!data) {
+ oomUnsafe.crash(
+ "Failed to allocate typed array elements while tenuring.");
+ }
+ MOZ_ASSERT(!nursery.isInside(data));
+ newObj->setReservedSlot(DATA_SLOT, PrivateValue(data));
+ AddCellMemory(newObj, nbytes, MemoryUse::TypedArrayElements);
+ }
+
+ mozilla::PodCopy(newObj->elements(), oldObj->elements(), nbytes);
+
+ // Set a forwarding pointer for the element buffers in case they were
+ // preserved on the stack by Ion.
+ nursery.setForwardingPointerWhileTenuring(
+ oldObj->elements(), newObj->elements(),
+ /* direct = */ nbytes >= sizeof(uintptr_t));
+
+ return newObj->hasInlineElements() ? 0 : nbytes;
+}
+
+bool TypedArrayObject::hasInlineElements() const {
+ return elements() == this->fixedData(TypedArrayObject::FIXED_DATA_START) &&
+ byteLength() <= TypedArrayObject::INLINE_BUFFER_LIMIT;
+}
+
+void TypedArrayObject::setInlineElements() {
+ char* dataSlot = reinterpret_cast<char*>(this) + dataOffset();
+ *reinterpret_cast<void**>(dataSlot) =
+ this->fixedData(TypedArrayObject::FIXED_DATA_START);
+}
+
+/* Helper clamped uint8_t type */
+
+uint32_t js::ClampDoubleToUint8(const double x) {
+ // Not < so that NaN coerces to 0
+ if (!(x >= 0)) {
+ return 0;
+ }
+
+ if (x > 255) {
+ return 255;
+ }
+
+ double toTruncate = x + 0.5;
+ uint8_t y = uint8_t(toTruncate);
+
+ /*
+ * now val is rounded to nearest, ties rounded up. We want
+ * rounded to nearest ties to even, so check whether we had a
+ * tie.
+ */
+ if (y == toTruncate) {
+ /*
+ * It was a tie (since adding 0.5 gave us the exact integer
+ * we want). Since we rounded up, we either already have an
+ * even number or we have an odd number but the number we
+ * want is one less. So just unconditionally masking out the
+ * ones bit should do the trick to get us the value we
+ * want.
+ */
+ return y & ~1;
+ }
+
+ return y;
+}
+
+namespace {
+
+static TypedArrayObject* NewTypedArrayObject(JSContext* cx,
+ const JSClass* clasp,
+ HandleObject proto,
+ gc::AllocKind allocKind,
+ gc::Heap heap) {
+ MOZ_ASSERT(proto);
+
+ MOZ_ASSERT(CanChangeToBackgroundAllocKind(allocKind, clasp));
+ allocKind = ForegroundToBackgroundAllocKind(allocKind);
+
+ // Typed arrays can store data inline so we only use fixed slots to cover the
+ // reserved slots, ignoring the AllocKind.
+ MOZ_ASSERT(ClassCanHaveFixedData(clasp));
+ constexpr size_t nfixed = TypedArrayObject::RESERVED_SLOTS;
+ static_assert(nfixed <= NativeObject::MAX_FIXED_SLOTS);
+ static_assert(nfixed == TypedArrayObject::FIXED_DATA_START);
+
+ Rooted<SharedShape*> shape(
+ cx,
+ SharedShape::getInitialShape(cx, clasp, cx->realm(), AsTaggedProto(proto),
+ nfixed, ObjectFlags()));
+ if (!shape) {
+ return nullptr;
+ }
+
+ NativeObject* obj = NativeObject::create(cx, allocKind, heap, shape);
+ if (!obj) {
+ return nullptr;
+ }
+
+ return &obj->as<TypedArrayObject>();
+}
+
+template <typename NativeType>
+class TypedArrayObjectTemplate : public TypedArrayObject {
+ friend class TypedArrayObject;
+
+ public:
+ static constexpr Scalar::Type ArrayTypeID() {
+ return TypeIDOfType<NativeType>::id;
+ }
+ static constexpr JSProtoKey protoKey() {
+ return TypeIDOfType<NativeType>::protoKey;
+ }
+
+ static constexpr bool ArrayTypeIsUnsigned() {
+ return TypeIsUnsigned<NativeType>();
+ }
+ static constexpr bool ArrayTypeIsFloatingPoint() {
+ return TypeIsFloatingPoint<NativeType>();
+ }
+
+ static constexpr size_t BYTES_PER_ELEMENT = sizeof(NativeType);
+
+ static JSObject* createPrototype(JSContext* cx, JSProtoKey key) {
+ Handle<GlobalObject*> global = cx->global();
+ RootedObject typedArrayProto(
+ cx, GlobalObject::getOrCreateTypedArrayPrototype(cx, global));
+ if (!typedArrayProto) {
+ return nullptr;
+ }
+
+ const JSClass* clasp = TypedArrayObject::protoClassForType(ArrayTypeID());
+ return GlobalObject::createBlankPrototypeInheriting(cx, clasp,
+ typedArrayProto);
+ }
+
+ static JSObject* createConstructor(JSContext* cx, JSProtoKey key) {
+ Handle<GlobalObject*> global = cx->global();
+ RootedFunction ctorProto(
+ cx, GlobalObject::getOrCreateTypedArrayConstructor(cx, global));
+ if (!ctorProto) {
+ return nullptr;
+ }
+
+ JSFunction* fun = NewFunctionWithProto(
+ cx, class_constructor, 3, FunctionFlags::NATIVE_CTOR, nullptr,
+ ClassName(key, cx), ctorProto, gc::AllocKind::FUNCTION, TenuredObject);
+
+ if (fun) {
+ fun->setJitInfo(&jit::JitInfo_TypedArrayConstructor);
+ }
+
+ return fun;
+ }
+
+ static inline const JSClass* instanceClass() {
+ return TypedArrayObject::classForType(ArrayTypeID());
+ }
+
+ static bool is(HandleValue v) {
+ return v.isObject() && v.toObject().hasClass(instanceClass());
+ }
+
+ static bool convertValue(JSContext* cx, HandleValue v, NativeType* result);
+
+ static TypedArrayObject* newBuiltinClassInstance(JSContext* cx,
+ gc::AllocKind allocKind,
+ gc::Heap heap) {
+ RootedObject proto(cx, GlobalObject::getOrCreatePrototype(cx, protoKey()));
+ if (!proto) {
+ return nullptr;
+ }
+ return NewTypedArrayObject(cx, instanceClass(), proto, allocKind, heap);
+ }
+
+ static TypedArrayObject* makeProtoInstance(JSContext* cx, HandleObject proto,
+ gc::AllocKind allocKind) {
+ MOZ_ASSERT(proto);
+ return NewTypedArrayObject(cx, instanceClass(), proto, allocKind,
+ gc::Heap::Default);
+ }
+
+ static TypedArrayObject* makeInstance(
+ JSContext* cx, Handle<ArrayBufferObjectMaybeShared*> buffer,
+ size_t byteOffset, size_t len, HandleObject proto,
+ gc::Heap heap = gc::Heap::Default) {
+ MOZ_ASSERT(len <= MaxByteLength / BYTES_PER_ELEMENT);
+
+ gc::AllocKind allocKind =
+ buffer ? gc::GetGCObjectKind(instanceClass())
+ : AllocKindForLazyBuffer(len * BYTES_PER_ELEMENT);
+
+ AutoSetNewObjectMetadata metadata(cx);
+ Rooted<TypedArrayObject*> obj(cx);
+ if (proto) {
+ obj = makeProtoInstance(cx, proto, allocKind);
+ } else {
+ obj = newBuiltinClassInstance(cx, allocKind, heap);
+ }
+ if (!obj || !obj->init(cx, buffer, byteOffset, len, BYTES_PER_ELEMENT)) {
+ return nullptr;
+ }
+
+ return obj;
+ }
+
+ static TypedArrayObject* makeTemplateObject(JSContext* cx, int32_t len) {
+ MOZ_ASSERT(len >= 0);
+ size_t nbytes;
+ MOZ_ALWAYS_TRUE(CalculateAllocSize<NativeType>(len, &nbytes));
+ bool fitsInline = nbytes <= INLINE_BUFFER_LIMIT;
+ gc::AllocKind allocKind = !fitsInline ? gc::GetGCObjectKind(instanceClass())
+ : AllocKindForLazyBuffer(nbytes);
+ MOZ_ASSERT(allocKind >= gc::GetGCObjectKind(instanceClass()));
+
+ AutoSetNewObjectMetadata metadata(cx);
+
+ Rooted<TypedArrayObject*> tarray(
+ cx, newBuiltinClassInstance(cx, allocKind, gc::Heap::Tenured));
+ if (!tarray) {
+ return nullptr;
+ }
+
+ initTypedArraySlots(tarray, len);
+
+ // Template objects don't need memory for their elements, since there
+ // won't be any elements to store.
+ MOZ_ASSERT(tarray->getReservedSlot(DATA_SLOT).isUndefined());
+
+ return tarray;
+ }
+
+ static void initTypedArraySlots(TypedArrayObject* tarray, int32_t len) {
+ MOZ_ASSERT(len >= 0);
+ tarray->initFixedSlot(TypedArrayObject::BUFFER_SLOT, NullValue());
+ tarray->initFixedSlot(TypedArrayObject::LENGTH_SLOT, PrivateValue(len));
+ tarray->initFixedSlot(TypedArrayObject::BYTEOFFSET_SLOT,
+ PrivateValue(size_t(0)));
+
+#ifdef DEBUG
+ if (len == 0) {
+ uint8_t* output = tarray->fixedData(TypedArrayObject::FIXED_DATA_START);
+ output[0] = TypedArrayObject::ZeroLengthArrayData;
+ }
+#endif
+ }
+
+ static void initTypedArrayData(TypedArrayObject* tarray, void* buf,
+ size_t nbytes, gc::AllocKind allocKind) {
+ if (buf) {
+ InitReservedSlot(tarray, TypedArrayObject::DATA_SLOT, buf, nbytes,
+ MemoryUse::TypedArrayElements);
+ } else {
+#ifdef DEBUG
+ constexpr size_t dataOffset = ArrayBufferViewObject::dataOffset();
+ constexpr size_t offset = dataOffset + sizeof(HeapSlot);
+ MOZ_ASSERT(offset + nbytes <= GetGCKindBytes(allocKind));
+#endif
+
+ void* data = tarray->fixedData(FIXED_DATA_START);
+ tarray->initReservedSlot(DATA_SLOT, PrivateValue(data));
+ memset(data, 0, nbytes);
+ }
+ }
+
+ static TypedArrayObject* makeTypedArrayWithTemplate(
+ JSContext* cx, TypedArrayObject* templateObj, int32_t len) {
+ if (len < 0 || size_t(len) > MaxByteLength / BYTES_PER_ELEMENT) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return nullptr;
+ }
+
+ size_t nbytes = size_t(len) * BYTES_PER_ELEMENT;
+ MOZ_ASSERT(nbytes <= MaxByteLength);
+
+ bool fitsInline = nbytes <= INLINE_BUFFER_LIMIT;
+
+ AutoSetNewObjectMetadata metadata(cx);
+
+ gc::AllocKind allocKind = !fitsInline ? gc::GetGCObjectKind(instanceClass())
+ : AllocKindForLazyBuffer(nbytes);
+ MOZ_ASSERT(templateObj->getClass() == instanceClass());
+
+ RootedObject proto(cx, templateObj->staticPrototype());
+ TypedArrayObject* obj = makeProtoInstance(cx, proto, allocKind);
+ if (!obj) {
+ return nullptr;
+ }
+
+ initTypedArraySlots(obj, len);
+
+ void* buf = nullptr;
+ if (!fitsInline) {
+ MOZ_ASSERT(len > 0);
+
+ nbytes = RoundUp(nbytes, sizeof(Value));
+ buf = cx->nursery().allocateZeroedBuffer(obj, nbytes,
+ js::ArrayBufferContentsArena);
+ if (!buf) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ }
+
+ initTypedArrayData(obj, buf, nbytes, allocKind);
+
+ return obj;
+ }
+
+ static TypedArrayObject* makeTypedArrayWithTemplate(
+ JSContext* cx, TypedArrayObject* templateObj, HandleObject array) {
+ MOZ_ASSERT(!IsWrapper(array));
+ MOZ_ASSERT(!array->is<ArrayBufferObjectMaybeShared>());
+
+ return fromArray(cx, array);
+ }
+
+ static TypedArrayObject* makeTypedArrayWithTemplate(
+ JSContext* cx, TypedArrayObject* templateObj, HandleObject arrayBuffer,
+ HandleValue byteOffsetValue, HandleValue lengthValue) {
+ MOZ_ASSERT(!IsWrapper(arrayBuffer));
+ MOZ_ASSERT(arrayBuffer->is<ArrayBufferObjectMaybeShared>());
+
+ uint64_t byteOffset, length;
+ if (!byteOffsetAndLength(cx, byteOffsetValue, lengthValue, &byteOffset,
+ &length)) {
+ return nullptr;
+ }
+
+ return fromBufferSameCompartment(
+ cx, arrayBuffer.as<ArrayBufferObjectMaybeShared>(), byteOffset, length,
+ nullptr);
+ }
+
+ // ES2023 draft rev cf86f1cdc28e809170733d74ea64fd0f3dd79f78
+ // 23.2.5.1 TypedArray ( ...args )
+ static bool class_constructor(JSContext* cx, unsigned argc, Value* vp) {
+ AutoJSConstructorProfilerEntry pseudoFrame(cx, "[TypedArray]");
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Step 1.
+ if (!ThrowIfNotConstructing(cx, args, "typed array")) {
+ return false;
+ }
+
+ // Steps 2-6.
+ JSObject* obj = create(cx, args);
+ if (!obj) {
+ return false;
+ }
+ args.rval().setObject(*obj);
+ return true;
+ }
+
+ private:
+ static JSObject* create(JSContext* cx, const CallArgs& args) {
+ MOZ_ASSERT(args.isConstructing());
+
+ // Steps 5 and 6.c.
+ if (args.length() == 0 || !args[0].isObject()) {
+ // Step 6.c.ii.
+ uint64_t len;
+ if (!ToIndex(cx, args.get(0), JSMSG_BAD_ARRAY_LENGTH, &len)) {
+ return nullptr;
+ }
+
+ // Steps 5.a and 6.c.iii.
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, protoKey(), &proto)) {
+ return nullptr;
+ }
+
+ return fromLength(cx, len, proto);
+ }
+
+ RootedObject dataObj(cx, &args[0].toObject());
+
+ // Step 6.b.i.
+ // 23.2.5.1.1 AllocateTypedArray, step 1.
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, protoKey(), &proto)) {
+ return nullptr;
+ }
+
+ // Steps 6.b.ii and 6.b.iv.
+ if (!UncheckedUnwrap(dataObj)->is<ArrayBufferObjectMaybeShared>()) {
+ return fromArray(cx, dataObj, proto);
+ }
+
+ // Steps 6.b.iii.1-2.
+ // 23.2.5.1.3 InitializeTypedArrayFromArrayBuffer, steps 2 and 4.
+ uint64_t byteOffset, length;
+ if (!byteOffsetAndLength(cx, args.get(1), args.get(2), &byteOffset,
+ &length)) {
+ return nullptr;
+ }
+
+ // Step 6.b.iii.3.
+ if (dataObj->is<ArrayBufferObjectMaybeShared>()) {
+ HandleArrayBufferObjectMaybeShared buffer =
+ dataObj.as<ArrayBufferObjectMaybeShared>();
+ return fromBufferSameCompartment(cx, buffer, byteOffset, length, proto);
+ }
+ return fromBufferWrapped(cx, dataObj, byteOffset, length, proto);
+ }
+
+ // ES2023 draft rev cf86f1cdc28e809170733d74ea64fd0f3dd79f78
+ // 23.2.5.1.3 InitializeTypedArrayFromArrayBuffer ( O, buffer, byteOffset,
+ // length ) Steps 2 and 4.
+ static bool byteOffsetAndLength(JSContext* cx, HandleValue byteOffsetValue,
+ HandleValue lengthValue, uint64_t* byteOffset,
+ uint64_t* length) {
+ // Step 2.
+ *byteOffset = 0;
+ if (!byteOffsetValue.isUndefined()) {
+ if (!ToIndex(cx, byteOffsetValue, byteOffset)) {
+ return false;
+ }
+
+ // Step 7.
+ if (*byteOffset % BYTES_PER_ELEMENT != 0) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_CONSTRUCT_OFFSET_BOUNDS,
+ Scalar::name(ArrayTypeID()),
+ Scalar::byteSizeString(ArrayTypeID()));
+ return false;
+ }
+ }
+
+ // Step 4.
+ *length = UINT64_MAX;
+ if (!lengthValue.isUndefined()) {
+ if (!ToIndex(cx, lengthValue, length)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ // ES2023 draft rev cf86f1cdc28e809170733d74ea64fd0f3dd79f78
+ // 23.2.5.1.3 InitializeTypedArrayFromArrayBuffer ( O, buffer, byteOffset,
+ // length ) Steps 5-8.
+ static bool computeAndCheckLength(
+ JSContext* cx, HandleArrayBufferObjectMaybeShared bufferMaybeUnwrapped,
+ uint64_t byteOffset, uint64_t lengthIndex, size_t* length) {
+ MOZ_ASSERT(byteOffset % BYTES_PER_ELEMENT == 0);
+ MOZ_ASSERT(byteOffset < uint64_t(DOUBLE_INTEGRAL_PRECISION_LIMIT));
+ MOZ_ASSERT_IF(lengthIndex != UINT64_MAX,
+ lengthIndex < uint64_t(DOUBLE_INTEGRAL_PRECISION_LIMIT));
+
+ // Step 5.
+ if (bufferMaybeUnwrapped->isDetached()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_DETACHED);
+ return false;
+ }
+
+ // Step 6.
+ size_t bufferByteLength = bufferMaybeUnwrapped->byteLength();
+
+ size_t len;
+ if (lengthIndex == UINT64_MAX) {
+ // Steps 7.a and 7.c.
+ if (bufferByteLength % BYTES_PER_ELEMENT != 0) {
+ // The given byte array doesn't map exactly to
+ // |BYTES_PER_ELEMENT * N|
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_CONSTRUCT_OFFSET_MISALIGNED,
+ Scalar::name(ArrayTypeID()),
+ Scalar::byteSizeString(ArrayTypeID()));
+ return false;
+ }
+
+ if (byteOffset > bufferByteLength) {
+ // |byteOffset| is invalid.
+ JS_ReportErrorNumberASCII(
+ cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_CONSTRUCT_OFFSET_LENGTH_BOUNDS,
+ Scalar::name(ArrayTypeID()));
+ return false;
+ }
+
+ // Step 7.b.
+ size_t newByteLength = bufferByteLength - size_t(byteOffset);
+ len = newByteLength / BYTES_PER_ELEMENT;
+ } else {
+ // Step 8.a.
+ uint64_t newByteLength = lengthIndex * BYTES_PER_ELEMENT;
+
+ // Step 8.b.
+ if (byteOffset + newByteLength > bufferByteLength) {
+ // |byteOffset + newByteLength| is too big for the arraybuffer
+ JS_ReportErrorNumberASCII(
+ cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_CONSTRUCT_ARRAY_LENGTH_BOUNDS,
+ Scalar::name(ArrayTypeID()));
+ return false;
+ }
+
+ len = size_t(lengthIndex);
+ }
+
+ if (len > MaxByteLength / BYTES_PER_ELEMENT) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_CONSTRUCT_TOO_LARGE,
+ Scalar::name(ArrayTypeID()));
+ return false;
+ }
+
+ MOZ_ASSERT(len < SIZE_MAX);
+ *length = len;
+ return true;
+ }
+
+ // ES2023 draft rev cf86f1cdc28e809170733d74ea64fd0f3dd79f78
+ // 23.2.5.1.3 InitializeTypedArrayFromArrayBuffer ( O, buffer, byteOffset,
+ // length ) Steps 5-13.
+ static TypedArrayObject* fromBufferSameCompartment(
+ JSContext* cx, HandleArrayBufferObjectMaybeShared buffer,
+ uint64_t byteOffset, uint64_t lengthIndex, HandleObject proto) {
+ // Steps 5-8.
+ size_t length = 0;
+ if (!computeAndCheckLength(cx, buffer, byteOffset, lengthIndex, &length)) {
+ return nullptr;
+ }
+
+ // Steps 9-13.
+ return makeInstance(cx, buffer, byteOffset, length, proto);
+ }
+
+ // Create a TypedArray object in another compartment.
+ //
+ // ES6 supports creating a TypedArray in global A (using global A's
+ // TypedArray constructor) backed by an ArrayBuffer created in global B.
+ //
+ // Our TypedArrayObject implementation doesn't support a TypedArray in
+ // compartment A backed by an ArrayBuffer in compartment B. So in this
+ // case, we create the TypedArray in B (!) and return a cross-compartment
+ // wrapper.
+ //
+ // Extra twist: the spec says the new TypedArray's [[Prototype]] must be
+ // A's TypedArray.prototype. So even though we're creating the TypedArray
+ // in B, its [[Prototype]] must be (a cross-compartment wrapper for) the
+ // TypedArray.prototype in A.
+ static JSObject* fromBufferWrapped(JSContext* cx, HandleObject bufobj,
+ uint64_t byteOffset, uint64_t lengthIndex,
+ HandleObject proto) {
+ JSObject* unwrapped = CheckedUnwrapStatic(bufobj);
+ if (!unwrapped) {
+ ReportAccessDenied(cx);
+ return nullptr;
+ }
+
+ if (!unwrapped->is<ArrayBufferObjectMaybeShared>()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_BAD_ARGS);
+ return nullptr;
+ }
+
+ RootedArrayBufferObjectMaybeShared unwrappedBuffer(cx);
+ unwrappedBuffer = &unwrapped->as<ArrayBufferObjectMaybeShared>();
+
+ size_t length = 0;
+ if (!computeAndCheckLength(cx, unwrappedBuffer, byteOffset, lengthIndex,
+ &length)) {
+ return nullptr;
+ }
+
+ // Make sure to get the [[Prototype]] for the created typed array from
+ // this compartment.
+ RootedObject protoRoot(cx, proto);
+ if (!protoRoot) {
+ protoRoot = GlobalObject::getOrCreatePrototype(cx, protoKey());
+ if (!protoRoot) {
+ return nullptr;
+ }
+ }
+
+ RootedObject typedArray(cx);
+ {
+ JSAutoRealm ar(cx, unwrappedBuffer);
+
+ RootedObject wrappedProto(cx, protoRoot);
+ if (!cx->compartment()->wrap(cx, &wrappedProto)) {
+ return nullptr;
+ }
+
+ typedArray =
+ makeInstance(cx, unwrappedBuffer, byteOffset, length, wrappedProto);
+ if (!typedArray) {
+ return nullptr;
+ }
+ }
+
+ if (!cx->compartment()->wrap(cx, &typedArray)) {
+ return nullptr;
+ }
+
+ return typedArray;
+ }
+
+ public:
+ static JSObject* fromBuffer(JSContext* cx, HandleObject bufobj,
+ size_t byteOffset, int64_t lengthInt) {
+ if (byteOffset % BYTES_PER_ELEMENT != 0) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_CONSTRUCT_OFFSET_BOUNDS,
+ Scalar::name(ArrayTypeID()),
+ Scalar::byteSizeString(ArrayTypeID()));
+ return nullptr; // invalid byteOffset
+ }
+
+ uint64_t lengthIndex = lengthInt >= 0 ? uint64_t(lengthInt) : UINT64_MAX;
+ if (bufobj->is<ArrayBufferObjectMaybeShared>()) {
+ HandleArrayBufferObjectMaybeShared buffer =
+ bufobj.as<ArrayBufferObjectMaybeShared>();
+ return fromBufferSameCompartment(cx, buffer, byteOffset, lengthIndex,
+ nullptr);
+ }
+ return fromBufferWrapped(cx, bufobj, byteOffset, lengthIndex, nullptr);
+ }
+
+ static bool maybeCreateArrayBuffer(JSContext* cx, uint64_t count,
+ MutableHandle<ArrayBufferObject*> buffer) {
+ if (count > MaxByteLength / BYTES_PER_ELEMENT) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return false;
+ }
+ size_t byteLength = count * BYTES_PER_ELEMENT;
+
+ MOZ_ASSERT(byteLength <= MaxByteLength);
+ static_assert(INLINE_BUFFER_LIMIT % BYTES_PER_ELEMENT == 0,
+ "ArrayBuffer inline storage shouldn't waste any space");
+
+ if (byteLength <= INLINE_BUFFER_LIMIT) {
+ // The array's data can be inline, and the buffer created lazily.
+ return true;
+ }
+
+ ArrayBufferObject* buf = ArrayBufferObject::createZeroed(cx, byteLength);
+ if (!buf) {
+ return false;
+ }
+
+ buffer.set(buf);
+ return true;
+ }
+
+ // ES2023 draft rev cf86f1cdc28e809170733d74ea64fd0f3dd79f78
+ // 23.2.5.1.1 AllocateTypedArray ( constructorName, newTarget, defaultProto [
+ // , length ] )
+ static TypedArrayObject* fromLength(JSContext* cx, uint64_t nelements,
+ HandleObject proto = nullptr,
+ gc::Heap heap = gc::Heap::Default) {
+ Rooted<ArrayBufferObject*> buffer(cx);
+ if (!maybeCreateArrayBuffer(cx, nelements, &buffer)) {
+ return nullptr;
+ }
+
+ return makeInstance(cx, buffer, 0, nelements, proto, heap);
+ }
+
+ static TypedArrayObject* fromArray(JSContext* cx, HandleObject other,
+ HandleObject proto = nullptr);
+
+ static TypedArrayObject* fromTypedArray(JSContext* cx, HandleObject other,
+ bool isWrapped, HandleObject proto);
+
+ static TypedArrayObject* fromObject(JSContext* cx, HandleObject other,
+ HandleObject proto);
+
+ static const NativeType getIndex(TypedArrayObject* tarray, size_t index) {
+ MOZ_ASSERT(index < tarray->length());
+ return jit::AtomicOperations::loadSafeWhenRacy(
+ tarray->dataPointerEither().cast<NativeType*>() + index);
+ }
+
+ static void setIndex(TypedArrayObject& tarray, size_t index, NativeType val) {
+ MOZ_ASSERT(index < tarray.length());
+ jit::AtomicOperations::storeSafeWhenRacy(
+ tarray.dataPointerEither().cast<NativeType*>() + index, val);
+ }
+
+ static bool getElement(JSContext* cx, TypedArrayObject* tarray, size_t index,
+ MutableHandleValue val);
+ static bool getElementPure(TypedArrayObject* tarray, size_t index, Value* vp);
+
+ static bool setElement(JSContext* cx, Handle<TypedArrayObject*> obj,
+ uint64_t index, HandleValue v, ObjectOpResult& result);
+};
+
+template <typename NativeType>
+bool TypedArrayObjectTemplate<NativeType>::convertValue(JSContext* cx,
+ HandleValue v,
+ NativeType* result) {
+ double d;
+ if (!ToNumber(cx, v, &d)) {
+ return false;
+ }
+
+ if (js::SupportDifferentialTesting()) {
+ // See the comment in ElementSpecific::doubleToNative.
+ d = JS::CanonicalizeNaN(d);
+ }
+
+ // Assign based on characteristics of the destination type
+ if constexpr (ArrayTypeIsFloatingPoint()) {
+ *result = NativeType(d);
+ } else if constexpr (ArrayTypeIsUnsigned()) {
+ static_assert(sizeof(NativeType) <= 4);
+ uint32_t n = ToUint32(d);
+ *result = NativeType(n);
+ } else if constexpr (ArrayTypeID() == Scalar::Uint8Clamped) {
+ // The uint8_clamped type has a special rounding converter
+ // for doubles.
+ *result = NativeType(d);
+ } else {
+ static_assert(sizeof(NativeType) <= 4);
+ int32_t n = ToInt32(d);
+ *result = NativeType(n);
+ }
+ return true;
+}
+
+template <>
+bool TypedArrayObjectTemplate<int64_t>::convertValue(JSContext* cx,
+ HandleValue v,
+ int64_t* result) {
+ JS_TRY_VAR_OR_RETURN_FALSE(cx, *result, ToBigInt64(cx, v));
+ return true;
+}
+
+template <>
+bool TypedArrayObjectTemplate<uint64_t>::convertValue(JSContext* cx,
+ HandleValue v,
+ uint64_t* result) {
+ JS_TRY_VAR_OR_RETURN_FALSE(cx, *result, ToBigUint64(cx, v));
+ return true;
+}
+
+// https://tc39.github.io/proposal-bigint/#sec-integerindexedelementset
+// 9.4.5.11 IntegerIndexedElementSet ( O, index, value )
+template <typename NativeType>
+/* static */ bool TypedArrayObjectTemplate<NativeType>::setElement(
+ JSContext* cx, Handle<TypedArrayObject*> obj, uint64_t index, HandleValue v,
+ ObjectOpResult& result) {
+ MOZ_ASSERT(!obj->hasDetachedBuffer());
+ MOZ_ASSERT(index < obj->length());
+
+ // Step 1 is enforced by the caller.
+
+ // Steps 2-3.
+ NativeType nativeValue;
+ if (!convertValue(cx, v, &nativeValue)) {
+ return false;
+ }
+
+ // Step 4.
+ if (index < obj->length()) {
+ MOZ_ASSERT(!obj->hasDetachedBuffer(),
+ "detaching an array buffer sets the length to zero");
+ TypedArrayObjectTemplate<NativeType>::setIndex(*obj, index, nativeValue);
+ }
+
+ // Step 5.
+ return result.succeed();
+}
+
+#define CREATE_TYPE_FOR_TYPED_ARRAY(_, T, N) \
+ typedef TypedArrayObjectTemplate<T> N##Array;
+JS_FOR_EACH_TYPED_ARRAY(CREATE_TYPE_FOR_TYPED_ARRAY)
+#undef CREATE_TYPE_FOR_TYPED_ARRAY
+
+} /* anonymous namespace */
+
+TypedArrayObject* js::NewTypedArrayWithTemplateAndLength(
+ JSContext* cx, HandleObject templateObj, int32_t len) {
+ MOZ_ASSERT(templateObj->is<TypedArrayObject>());
+ TypedArrayObject* tobj = &templateObj->as<TypedArrayObject>();
+
+ switch (tobj->type()) {
+#define CREATE_TYPED_ARRAY(_, T, N) \
+ case Scalar::N: \
+ return TypedArrayObjectTemplate<T>::makeTypedArrayWithTemplate(cx, tobj, \
+ len);
+ JS_FOR_EACH_TYPED_ARRAY(CREATE_TYPED_ARRAY)
+#undef CREATE_TYPED_ARRAY
+ default:
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+}
+
+TypedArrayObject* js::NewTypedArrayWithTemplateAndArray(
+ JSContext* cx, HandleObject templateObj, HandleObject array) {
+ MOZ_ASSERT(templateObj->is<TypedArrayObject>());
+ TypedArrayObject* tobj = &templateObj->as<TypedArrayObject>();
+
+ switch (tobj->type()) {
+#define CREATE_TYPED_ARRAY(_, T, N) \
+ case Scalar::N: \
+ return TypedArrayObjectTemplate<T>::makeTypedArrayWithTemplate(cx, tobj, \
+ array);
+ JS_FOR_EACH_TYPED_ARRAY(CREATE_TYPED_ARRAY)
+#undef CREATE_TYPED_ARRAY
+ default:
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+}
+
+TypedArrayObject* js::NewTypedArrayWithTemplateAndBuffer(
+ JSContext* cx, HandleObject templateObj, HandleObject arrayBuffer,
+ HandleValue byteOffset, HandleValue length) {
+ MOZ_ASSERT(templateObj->is<TypedArrayObject>());
+ TypedArrayObject* tobj = &templateObj->as<TypedArrayObject>();
+
+ switch (tobj->type()) {
+#define CREATE_TYPED_ARRAY(_, T, N) \
+ case Scalar::N: \
+ return TypedArrayObjectTemplate<T>::makeTypedArrayWithTemplate( \
+ cx, tobj, arrayBuffer, byteOffset, length);
+ JS_FOR_EACH_TYPED_ARRAY(CREATE_TYPED_ARRAY)
+#undef CREATE_TYPED_ARRAY
+ default:
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+}
+
+TypedArrayObject* js::NewUint8ArrayWithLength(JSContext* cx, int32_t len,
+ gc::Heap heap) {
+ return TypedArrayObjectTemplate<uint8_t>::fromLength(cx, len, nullptr, heap);
+}
+
+template <typename T>
+/* static */ TypedArrayObject* TypedArrayObjectTemplate<T>::fromArray(
+ JSContext* cx, HandleObject other, HandleObject proto /* = nullptr */) {
+ // Allow nullptr proto for FriendAPI methods, which don't care about
+ // subclassing.
+ if (other->is<TypedArrayObject>()) {
+ return fromTypedArray(cx, other, /* wrapped= */ false, proto);
+ }
+
+ if (other->is<WrapperObject>() &&
+ UncheckedUnwrap(other)->is<TypedArrayObject>()) {
+ return fromTypedArray(cx, other, /* wrapped= */ true, proto);
+ }
+
+ return fromObject(cx, other, proto);
+}
+
+// ES2023 draft rev cf86f1cdc28e809170733d74ea64fd0f3dd79f78
+// 23.2.5.1 TypedArray ( ...args )
+// 23.2.5.1.2 InitializeTypedArrayFromTypedArray ( O, srcArray )
+template <typename T>
+/* static */ TypedArrayObject* TypedArrayObjectTemplate<T>::fromTypedArray(
+ JSContext* cx, HandleObject other, bool isWrapped, HandleObject proto) {
+ MOZ_ASSERT_IF(!isWrapped, other->is<TypedArrayObject>());
+ MOZ_ASSERT_IF(isWrapped, other->is<WrapperObject>() &&
+ UncheckedUnwrap(other)->is<TypedArrayObject>());
+
+ Rooted<TypedArrayObject*> srcArray(cx);
+ if (!isWrapped) {
+ srcArray = &other->as<TypedArrayObject>();
+ } else {
+ srcArray = other->maybeUnwrapAs<TypedArrayObject>();
+ if (!srcArray) {
+ ReportAccessDenied(cx);
+ return nullptr;
+ }
+ }
+
+ // InitializeTypedArrayFromTypedArray, step 1. (Skipped)
+
+ // InitializeTypedArrayFromTypedArray, step 2.
+ if (srcArray->hasDetachedBuffer()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_DETACHED);
+ return nullptr;
+ }
+
+ // InitializeTypedArrayFromTypedArray, steps 3-7. (Skipped)
+
+ // InitializeTypedArrayFromTypedArray, step 8.
+ size_t elementLength = srcArray->length();
+
+ // InitializeTypedArrayFromTypedArray, step 9. (Skipped)
+
+ // InitializeTypedArrayFromTypedArray, step 10.a. (Partial)
+ // InitializeTypedArrayFromTypedArray, step 11.a.
+ Rooted<ArrayBufferObject*> buffer(cx);
+ if (!maybeCreateArrayBuffer(cx, elementLength, &buffer)) {
+ return nullptr;
+ }
+
+ // InitializeTypedArrayFromTypedArray, step 11.b.
+ if (Scalar::isBigIntType(ArrayTypeID()) !=
+ Scalar::isBigIntType(srcArray->type())) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_NOT_COMPATIBLE,
+ srcArray->getClass()->name,
+ TypedArrayObject::classes[ArrayTypeID()].name);
+ return nullptr;
+ }
+
+ // Step 6.b.i.
+ // InitializeTypedArrayFromTypedArray, steps 12-15.
+ Rooted<TypedArrayObject*> obj(
+ cx, makeInstance(cx, buffer, 0, elementLength, proto));
+ if (!obj) {
+ return nullptr;
+ }
+
+ MOZ_RELEASE_ASSERT(!srcArray->hasDetachedBuffer());
+
+ // InitializeTypedArrayFromTypedArray, steps 10.a. (Remaining parts)
+ // InitializeTypedArrayFromTypedArray, steps 11.c-f.
+ MOZ_ASSERT(!obj->isSharedMemory());
+ if (srcArray->isSharedMemory()) {
+ if (!ElementSpecific<T, SharedOps>::setFromTypedArray(obj, srcArray, 0)) {
+ return nullptr;
+ }
+ } else {
+ if (!ElementSpecific<T, UnsharedOps>::setFromTypedArray(obj, srcArray, 0)) {
+ return nullptr;
+ }
+ }
+
+ // Step 6.b.v.
+ return obj;
+}
+
+static MOZ_ALWAYS_INLINE bool IsOptimizableInit(JSContext* cx,
+ HandleObject iterable,
+ bool* optimized) {
+ MOZ_ASSERT(!*optimized);
+
+ if (!IsPackedArray(iterable)) {
+ return true;
+ }
+
+ ForOfPIC::Chain* stubChain = ForOfPIC::getOrCreate(cx);
+ if (!stubChain) {
+ return false;
+ }
+
+ return stubChain->tryOptimizeArray(cx, iterable.as<ArrayObject>(), optimized);
+}
+
+// ES2023 draft rev cf86f1cdc28e809170733d74ea64fd0f3dd79f78
+// 23.2.5.1 TypedArray ( ...args )
+// 23.2.5.1.4 InitializeTypedArrayFromList ( O, values )
+// 23.2.5.1.5 InitializeTypedArrayFromArrayLike ( O, arrayLike )
+template <typename T>
+/* static */ TypedArrayObject* TypedArrayObjectTemplate<T>::fromObject(
+ JSContext* cx, HandleObject other, HandleObject proto) {
+ // Steps 1-4 and 6.a (Already performed in caller).
+
+ // Steps 6.b.i (Allocation deferred until later).
+
+ // Steps 6.b.ii-iii. (Not applicable)
+
+ // Step 6.b.iv.
+
+ bool optimized = false;
+ if (!IsOptimizableInit(cx, other, &optimized)) {
+ return nullptr;
+ }
+
+ // Fast path when iterable is a packed array using the default iterator.
+ if (optimized) {
+ // Steps 6.b.iv.2-3. (We don't need to call IterableToList for the fast
+ // path).
+ Handle<ArrayObject*> array = other.as<ArrayObject>();
+
+ // InitializeTypedArrayFromList, step 1.
+ size_t len = array->getDenseInitializedLength();
+
+ // InitializeTypedArrayFromList, step 2.
+ Rooted<ArrayBufferObject*> buffer(cx);
+ if (!maybeCreateArrayBuffer(cx, len, &buffer)) {
+ return nullptr;
+ }
+
+ // Steps 6.b.i.
+ Rooted<TypedArrayObject*> obj(cx, makeInstance(cx, buffer, 0, len, proto));
+ if (!obj) {
+ return nullptr;
+ }
+
+ // InitializeTypedArrayFromList, steps 3-4.
+ MOZ_ASSERT(!obj->isSharedMemory());
+ if (!ElementSpecific<T, UnsharedOps>::initFromIterablePackedArray(cx, obj,
+ array)) {
+ return nullptr;
+ }
+
+ // InitializeTypedArrayFromList, step 5. (The assertion isn't applicable for
+ // the fast path).
+
+ // Step 6.b.v.
+ return obj;
+ }
+
+ // Step 6.b.iv.1 (Assertion; implicit in our implementation).
+
+ // Step 6.b.iv.2.
+ RootedValue callee(cx);
+ RootedId iteratorId(cx, PropertyKey::Symbol(cx->wellKnownSymbols().iterator));
+ if (!GetProperty(cx, other, other, iteratorId, &callee)) {
+ return nullptr;
+ }
+
+ // Steps 6.b.iv.3-4.
+ RootedObject arrayLike(cx);
+ if (!callee.isNullOrUndefined()) {
+ // Throw if other[Symbol.iterator] isn't callable.
+ if (!callee.isObject() || !callee.toObject().isCallable()) {
+ RootedValue otherVal(cx, ObjectValue(*other));
+ UniqueChars bytes =
+ DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, otherVal, nullptr);
+ if (!bytes) {
+ return nullptr;
+ }
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_NOT_ITERABLE,
+ bytes.get());
+ return nullptr;
+ }
+
+ FixedInvokeArgs<2> args2(cx);
+ args2[0].setObject(*other);
+ args2[1].set(callee);
+
+ // Step 6.b.iv.3.a.
+ RootedValue rval(cx);
+ if (!CallSelfHostedFunction(cx, cx->names().IterableToList,
+ UndefinedHandleValue, args2, &rval)) {
+ return nullptr;
+ }
+
+ // Step 6.b.iv.3.b (Implemented below).
+ arrayLike = &rval.toObject();
+ } else {
+ // Step 4.a is an assertion: object is not an Iterator. Testing this is
+ // literally the very last thing we did, so we don't assert here.
+
+ // Step 4.b (Implemented below).
+ arrayLike = other;
+ }
+
+ // We implement InitializeTypedArrayFromList in terms of
+ // InitializeTypedArrayFromArrayLike.
+
+ // InitializeTypedArrayFromArrayLike, step 1.
+ uint64_t len;
+ if (!GetLengthProperty(cx, arrayLike, &len)) {
+ return nullptr;
+ }
+
+ // InitializeTypedArrayFromArrayLike, step 2.
+ Rooted<ArrayBufferObject*> buffer(cx);
+ if (!maybeCreateArrayBuffer(cx, len, &buffer)) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(len <= MaxByteLength / BYTES_PER_ELEMENT);
+
+ // Steps 6.b.i.
+ Rooted<TypedArrayObject*> obj(cx, makeInstance(cx, buffer, 0, len, proto));
+ if (!obj) {
+ return nullptr;
+ }
+
+ // InitializeTypedArrayFromArrayLike, steps 3-4.
+ MOZ_ASSERT(!obj->isSharedMemory());
+ if (!ElementSpecific<T, UnsharedOps>::setFromNonTypedArray(cx, obj, arrayLike,
+ len)) {
+ return nullptr;
+ }
+
+ // Step 6.b.v.
+ return obj;
+}
+
+bool TypedArrayConstructor(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_CALL_OR_CONSTRUCT,
+ args.isConstructing() ? "construct" : "call");
+ return false;
+}
+
+template <typename T>
+static bool GetTemplateObjectForNative(JSContext* cx,
+ const JS::HandleValueArray args,
+ MutableHandleObject res) {
+ if (args.length() == 0) {
+ return true;
+ }
+
+ HandleValue arg = args[0];
+ if (arg.isInt32()) {
+ uint32_t len = 0;
+ if (arg.toInt32() >= 0) {
+ len = arg.toInt32();
+ }
+
+ size_t nbytes;
+ if (!js::CalculateAllocSize<T>(len, &nbytes) ||
+ nbytes > TypedArrayObject::MaxByteLength) {
+ return true;
+ }
+
+ res.set(TypedArrayObjectTemplate<T>::makeTemplateObject(cx, len));
+ return !!res;
+ }
+
+ // We don't support wrappers, because of the complicated interaction between
+ // wrapped ArrayBuffers and TypedArrays, see |fromBufferWrapped()|.
+ if (arg.isObject() && !IsWrapper(&arg.toObject())) {
+ // We don't use the template's length in the object case, so we can create
+ // the template typed array with an initial length of zero.
+ uint32_t len = 0;
+ res.set(TypedArrayObjectTemplate<T>::makeTemplateObject(cx, len));
+ return !!res;
+ }
+
+ return true;
+}
+
+/* static */ bool TypedArrayObject::GetTemplateObjectForNative(
+ JSContext* cx, Native native, const JS::HandleValueArray args,
+ MutableHandleObject res) {
+ MOZ_ASSERT(!res);
+#define CHECK_TYPED_ARRAY_CONSTRUCTOR(_, T, N) \
+ if (native == &TypedArrayObjectTemplate<T>::class_constructor) { \
+ return ::GetTemplateObjectForNative<T>(cx, args, res); \
+ }
+ JS_FOR_EACH_TYPED_ARRAY(CHECK_TYPED_ARRAY_CONSTRUCTOR)
+#undef CHECK_TYPED_ARRAY_CONSTRUCTOR
+ return true;
+}
+
+static bool LengthGetterImpl(JSContext* cx, const CallArgs& args) {
+ auto* tarr = &args.thisv().toObject().as<TypedArrayObject>();
+ args.rval().set(tarr->lengthValue());
+ return true;
+}
+
+static bool TypedArray_lengthGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<TypedArrayObject::is, LengthGetterImpl>(cx, args);
+}
+
+static bool ByteOffsetGetterImpl(JSContext* cx, const CallArgs& args) {
+ auto* tarr = &args.thisv().toObject().as<TypedArrayObject>();
+ args.rval().set(tarr->byteOffsetValue());
+ return true;
+}
+
+static bool TypedArray_byteOffsetGetter(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<TypedArrayObject::is, ByteOffsetGetterImpl>(cx,
+ args);
+}
+
+static bool ByteLengthGetterImpl(JSContext* cx, const CallArgs& args) {
+ auto* tarr = &args.thisv().toObject().as<TypedArrayObject>();
+ args.rval().set(tarr->byteLengthValue());
+ return true;
+}
+
+static bool TypedArray_byteLengthGetter(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<TypedArrayObject::is, ByteLengthGetterImpl>(cx,
+ args);
+}
+
+static bool BufferGetterImpl(JSContext* cx, const CallArgs& args) {
+ MOZ_ASSERT(TypedArrayObject::is(args.thisv()));
+ Rooted<TypedArrayObject*> tarray(
+ cx, &args.thisv().toObject().as<TypedArrayObject>());
+ if (!TypedArrayObject::ensureHasBuffer(cx, tarray)) {
+ return false;
+ }
+ args.rval().set(tarray->bufferValue());
+ return true;
+}
+
+static bool TypedArray_bufferGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<TypedArrayObject::is, BufferGetterImpl>(cx, args);
+}
+
+// ES2019 draft rev fc9ecdcd74294d0ca3146d4b274e2a8e79565dc3
+// 22.2.3.32 get %TypedArray%.prototype [ @@toStringTag ]
+static bool TypedArray_toStringTagGetter(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // Steps 1-2.
+ if (!args.thisv().isObject()) {
+ args.rval().setUndefined();
+ return true;
+ }
+
+ JSObject* obj = CheckedUnwrapStatic(&args.thisv().toObject());
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+
+ // Step 3.
+ if (!obj->is<TypedArrayObject>()) {
+ args.rval().setUndefined();
+ return true;
+ }
+
+ // Steps 4-6.
+ JSProtoKey protoKey = StandardProtoKeyOrNull(obj);
+ MOZ_ASSERT(protoKey);
+
+ args.rval().setString(ClassName(protoKey, cx));
+ return true;
+}
+
+/* static */ const JSPropertySpec TypedArrayObject::protoAccessors[] = {
+ JS_PSG("length", TypedArray_lengthGetter, 0),
+ JS_PSG("buffer", TypedArray_bufferGetter, 0),
+ JS_PSG("byteLength", TypedArray_byteLengthGetter, 0),
+ JS_PSG("byteOffset", TypedArray_byteOffsetGetter, 0),
+ JS_SYM_GET(toStringTag, TypedArray_toStringTagGetter, 0),
+ JS_PS_END};
+
+template <typename T>
+static inline bool SetFromTypedArray(Handle<TypedArrayObject*> target,
+ Handle<TypedArrayObject*> source,
+ size_t offset) {
+ // WARNING: |source| may be an unwrapped typed array from a different
+ // compartment. Proceed with caution!
+
+ if (target->isSharedMemory() || source->isSharedMemory()) {
+ return ElementSpecific<T, SharedOps>::setFromTypedArray(target, source,
+ offset);
+ }
+ return ElementSpecific<T, UnsharedOps>::setFromTypedArray(target, source,
+ offset);
+}
+
+template <typename T>
+static inline bool SetFromNonTypedArray(JSContext* cx,
+ Handle<TypedArrayObject*> target,
+ HandleObject source, size_t len,
+ size_t offset) {
+ MOZ_ASSERT(!source->is<TypedArrayObject>(), "use SetFromTypedArray");
+
+ if (target->isSharedMemory()) {
+ return ElementSpecific<T, SharedOps>::setFromNonTypedArray(
+ cx, target, source, len, offset);
+ }
+ return ElementSpecific<T, UnsharedOps>::setFromNonTypedArray(
+ cx, target, source, len, offset);
+}
+
+// ES2023 draft rev 22cc56ab08fcab92a865978c0aa5c6f1d8ce250f
+// 23.2.3.24.1 SetTypedArrayFromTypedArray ( target, targetOffset, source )
+static bool SetTypedArrayFromTypedArray(JSContext* cx,
+ Handle<TypedArrayObject*> target,
+ double targetOffset,
+ Handle<TypedArrayObject*> source) {
+ // WARNING: |source| may be an unwrapped typed array from a different
+ // compartment. Proceed with caution!
+
+ MOZ_ASSERT(targetOffset >= 0);
+
+ // Steps 1-2. (Performed in caller.)
+ MOZ_ASSERT(!target->hasDetachedBuffer());
+
+ // Steps 4-5.
+ if (source->hasDetachedBuffer()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_DETACHED);
+ return false;
+ }
+
+ // Step 3 (Reordered).
+ size_t targetLength = target->length();
+
+ // Steps 13-14 (Split into two checks to provide better error messages).
+ if (targetOffset > targetLength) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
+ return false;
+ }
+
+ // Step 14 (Cont'd).
+ size_t offset = size_t(targetOffset);
+ if (source->length() > targetLength - offset) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SOURCE_ARRAY_TOO_LONG);
+ return false;
+ }
+
+ // Step 15.
+ if (Scalar::isBigIntType(target->type()) !=
+ Scalar::isBigIntType(source->type())) {
+ JS_ReportErrorNumberASCII(
+ cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_NOT_COMPATIBLE,
+ source->getClass()->name, target->getClass()->name);
+ return false;
+ }
+
+ // Steps 6-12, 16-24.
+ switch (target->type()) {
+#define SET_FROM_TYPED_ARRAY(_, T, N) \
+ case Scalar::N: \
+ if (!SetFromTypedArray<T>(target, source, offset)) return false; \
+ break;
+ JS_FOR_EACH_TYPED_ARRAY(SET_FROM_TYPED_ARRAY)
+#undef SET_FROM_TYPED_ARRAY
+ default:
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+
+ return true;
+}
+
+// ES2023 draft rev 22cc56ab08fcab92a865978c0aa5c6f1d8ce250f
+// 23.2.3.24.1 SetTypedArrayFromArrayLike ( target, targetOffset, source )
+static bool SetTypedArrayFromArrayLike(JSContext* cx,
+ Handle<TypedArrayObject*> target,
+ double targetOffset, HandleObject src) {
+ MOZ_ASSERT(targetOffset >= 0);
+
+ // Steps 1-2. (Performed in caller.)
+ MOZ_ASSERT(!target->hasDetachedBuffer());
+
+ // Step 3.
+ // We can't reorder this step because side-effects in step 5 can detach the
+ // underlying array buffer from the typed array.
+ size_t targetLength = target->length();
+
+ // Step 4. (Performed in caller.)
+
+ // Step 5.
+ uint64_t srcLength;
+ if (!GetLengthProperty(cx, src, &srcLength)) {
+ return false;
+ }
+
+ // Steps 6-7 (Split into two checks to provide better error messages).
+ if (targetOffset > targetLength) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
+ return false;
+ }
+
+ // Step 7 (Cont'd).
+ size_t offset = size_t(targetOffset);
+ if (srcLength > targetLength - offset) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SOURCE_ARRAY_TOO_LONG);
+ return false;
+ }
+
+ MOZ_ASSERT(srcLength <= targetLength);
+
+ // Steps 8-9.
+ if (srcLength > 0) {
+ switch (target->type()) {
+#define SET_FROM_NON_TYPED_ARRAY(_, T, N) \
+ case Scalar::N: \
+ if (!SetFromNonTypedArray<T>(cx, target, src, srcLength, offset)) \
+ return false; \
+ break;
+ JS_FOR_EACH_TYPED_ARRAY(SET_FROM_NON_TYPED_ARRAY)
+#undef SET_FROM_NON_TYPED_ARRAY
+ default:
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+ }
+
+ // Step 10.
+ return true;
+}
+
+// ES2023 draft rev 22cc56ab08fcab92a865978c0aa5c6f1d8ce250f
+// 23.2.3.24 %TypedArray%.prototype.set ( source [ , offset ] )
+// 23.2.3.24.1 SetTypedArrayFromTypedArray ( target, targetOffset, source )
+// 23.2.3.24.2 SetTypedArrayFromArrayLike ( target, targetOffset, source )
+/* static */
+bool TypedArrayObject::set_impl(JSContext* cx, const CallArgs& args) {
+ MOZ_ASSERT(TypedArrayObject::is(args.thisv()));
+
+ // Steps 1-3 (Validation performed as part of CallNonGenericMethod).
+ Rooted<TypedArrayObject*> target(
+ cx, &args.thisv().toObject().as<TypedArrayObject>());
+
+ // Steps 4-5.
+ double targetOffset = 0;
+ if (args.length() > 1) {
+ // Step 4.
+ if (!ToInteger(cx, args[1], &targetOffset)) {
+ return false;
+ }
+
+ // Step 5.
+ if (targetOffset < 0) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
+ return false;
+ }
+ }
+
+ // 23.2.3.24.1, steps 1-2.
+ // 23.2.3.24.2, steps 1-2.
+ if (target->hasDetachedBuffer()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_DETACHED);
+ return false;
+ }
+
+ // 23.2.3.24.2, step 4. (23.2.3.24.1 only applies if args[0] is a typed
+ // array, so it doesn't make a difference there to apply ToObject here.)
+ RootedObject src(cx, ToObject(cx, args.get(0)));
+ if (!src) {
+ return false;
+ }
+
+ Rooted<TypedArrayObject*> srcTypedArray(cx);
+ {
+ JSObject* obj = CheckedUnwrapStatic(src);
+ if (!obj) {
+ ReportAccessDenied(cx);
+ return false;
+ }
+
+ if (obj->is<TypedArrayObject>()) {
+ srcTypedArray = &obj->as<TypedArrayObject>();
+ }
+ }
+
+ // Steps 6-7.
+ if (srcTypedArray) {
+ if (!SetTypedArrayFromTypedArray(cx, target, targetOffset, srcTypedArray)) {
+ return false;
+ }
+ } else {
+ if (!SetTypedArrayFromArrayLike(cx, target, targetOffset, src)) {
+ return false;
+ }
+ }
+
+ // Step 8.
+ args.rval().setUndefined();
+ return true;
+}
+
+/* static */
+bool TypedArrayObject::set(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<TypedArrayObject::is, TypedArrayObject::set_impl>(
+ cx, args);
+}
+
+// ES2020 draft rev dc1e21c454bd316810be1c0e7af0131a2d7f38e9
+// 22.2.3.5 %TypedArray%.prototype.copyWithin ( target, start [ , end ] )
+/* static */
+bool TypedArrayObject::copyWithin_impl(JSContext* cx, const CallArgs& args) {
+ MOZ_ASSERT(TypedArrayObject::is(args.thisv()));
+
+ // Steps 1-2.
+ Rooted<TypedArrayObject*> tarray(
+ cx, &args.thisv().toObject().as<TypedArrayObject>());
+ if (tarray->hasDetachedBuffer()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_DETACHED);
+ return false;
+ }
+
+ // Step 3.
+ size_t len = tarray->length();
+
+ // Step 4.
+ double relativeTarget;
+ if (!ToInteger(cx, args.get(0), &relativeTarget)) {
+ return false;
+ }
+
+ // Step 5.
+ uint64_t to;
+ if (relativeTarget < 0) {
+ to = std::max(len + relativeTarget, 0.0);
+ } else {
+ to = std::min(relativeTarget, double(len));
+ }
+
+ // Step 6.
+ double relativeStart;
+ if (!ToInteger(cx, args.get(1), &relativeStart)) {
+ return false;
+ }
+
+ // Step 7.
+ uint64_t from;
+ if (relativeStart < 0) {
+ from = std::max(len + relativeStart, 0.0);
+ } else {
+ from = std::min(relativeStart, double(len));
+ }
+
+ // Step 8.
+ double relativeEnd;
+ if (!args.hasDefined(2)) {
+ relativeEnd = len;
+ } else {
+ if (!ToInteger(cx, args[2], &relativeEnd)) {
+ return false;
+ }
+ }
+
+ // Step 9.
+ uint64_t final_;
+ if (relativeEnd < 0) {
+ final_ = std::max(len + relativeEnd, 0.0);
+ } else {
+ final_ = std::min(relativeEnd, double(len));
+ }
+
+ // Step 10.
+ MOZ_ASSERT(to <= len);
+ uint64_t count;
+ if (from <= final_) {
+ count = std::min(final_ - from, len - to);
+ } else {
+ count = 0;
+ }
+
+ // Step 11.
+ //
+ // Note that getting or setting a typed array element must throw if the
+ // underlying buffer is detached, so the code below checks for detachment.
+ // This happens *only* if a get/set occurs, i.e. when |count > 0|.
+ //
+ // Also note that this copies elements effectively by memmove, *not* in
+ // step 11's specified order. This is unobservable, even when the underlying
+ // buffer is a SharedArrayBuffer instance, because the access is unordered and
+ // therefore is allowed to have data races.
+
+ if (count == 0) {
+ args.rval().setObject(*tarray);
+ return true;
+ }
+
+ if (tarray->hasDetachedBuffer()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPED_ARRAY_DETACHED);
+ return false;
+ }
+
+ // Don't multiply by |tarray->bytesPerElement()| in case the compiler can't
+ // strength-reduce multiplication by 1/2/4/8 into the equivalent shift.
+ const size_t ElementShift = TypedArrayShift(tarray->type());
+
+ MOZ_ASSERT((SIZE_MAX >> ElementShift) > to);
+ size_t byteDest = to << ElementShift;
+
+ MOZ_ASSERT((SIZE_MAX >> ElementShift) > from);
+ size_t byteSrc = from << ElementShift;
+
+ MOZ_ASSERT((SIZE_MAX >> ElementShift) >= count);
+ size_t byteSize = count << ElementShift;
+
+#ifdef DEBUG
+ {
+ size_t viewByteLength = tarray->byteLength();
+ MOZ_ASSERT(byteSize <= viewByteLength);
+ MOZ_ASSERT(byteDest < viewByteLength);
+ MOZ_ASSERT(byteSrc < viewByteLength);
+ MOZ_ASSERT(byteDest <= viewByteLength - byteSize);
+ MOZ_ASSERT(byteSrc <= viewByteLength - byteSize);
+ }
+#endif
+
+ SharedMem<uint8_t*> data = tarray->dataPointerEither().cast<uint8_t*>();
+ if (tarray->isSharedMemory()) {
+ jit::AtomicOperations::memmoveSafeWhenRacy(data + byteDest, data + byteSrc,
+ byteSize);
+ } else {
+ memmove(data.unwrapUnshared() + byteDest, data.unwrapUnshared() + byteSrc,
+ byteSize);
+ }
+
+ args.rval().setObject(*tarray);
+ return true;
+}
+
+/* static */
+bool TypedArrayObject::copyWithin(JSContext* cx, unsigned argc, Value* vp) {
+ AutoJSMethodProfilerEntry pseudoFrame(cx, "[TypedArray].prototype",
+ "copyWithin");
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<TypedArrayObject::is,
+ TypedArrayObject::copyWithin_impl>(cx, args);
+}
+
+/* static */ const JSFunctionSpec TypedArrayObject::protoFunctions[] = {
+ JS_SELF_HOSTED_FN("subarray", "TypedArraySubarray", 2, 0),
+ JS_FN("set", TypedArrayObject::set, 1, 0),
+ JS_FN("copyWithin", TypedArrayObject::copyWithin, 2, 0),
+ JS_SELF_HOSTED_FN("every", "TypedArrayEvery", 1, 0),
+ JS_SELF_HOSTED_FN("fill", "TypedArrayFill", 3, 0),
+ JS_SELF_HOSTED_FN("filter", "TypedArrayFilter", 1, 0),
+ JS_SELF_HOSTED_FN("find", "TypedArrayFind", 1, 0),
+ JS_SELF_HOSTED_FN("findIndex", "TypedArrayFindIndex", 1, 0),
+ JS_SELF_HOSTED_FN("findLast", "TypedArrayFindLast", 1, 0),
+ JS_SELF_HOSTED_FN("findLastIndex", "TypedArrayFindLastIndex", 1, 0),
+ JS_SELF_HOSTED_FN("forEach", "TypedArrayForEach", 1, 0),
+ JS_SELF_HOSTED_FN("indexOf", "TypedArrayIndexOf", 2, 0),
+ JS_SELF_HOSTED_FN("join", "TypedArrayJoin", 1, 0),
+ JS_SELF_HOSTED_FN("lastIndexOf", "TypedArrayLastIndexOf", 1, 0),
+ JS_SELF_HOSTED_FN("map", "TypedArrayMap", 1, 0),
+ JS_SELF_HOSTED_FN("reduce", "TypedArrayReduce", 1, 0),
+ JS_SELF_HOSTED_FN("reduceRight", "TypedArrayReduceRight", 1, 0),
+ JS_SELF_HOSTED_FN("reverse", "TypedArrayReverse", 0, 0),
+ JS_SELF_HOSTED_FN("slice", "TypedArraySlice", 2, 0),
+ JS_SELF_HOSTED_FN("some", "TypedArraySome", 1, 0),
+ JS_SELF_HOSTED_FN("sort", "TypedArraySort", 1, 0),
+ JS_SELF_HOSTED_FN("entries", "TypedArrayEntries", 0, 0),
+ JS_SELF_HOSTED_FN("keys", "TypedArrayKeys", 0, 0),
+ JS_SELF_HOSTED_FN("values", "$TypedArrayValues", 0, 0),
+ JS_SELF_HOSTED_SYM_FN(iterator, "$TypedArrayValues", 0, 0),
+ JS_SELF_HOSTED_FN("includes", "TypedArrayIncludes", 2, 0),
+ JS_SELF_HOSTED_FN("toString", "ArrayToString", 0, 0),
+ JS_SELF_HOSTED_FN("toLocaleString", "TypedArrayToLocaleString", 2, 0),
+ JS_SELF_HOSTED_FN("at", "TypedArrayAt", 1, 0),
+ JS_SELF_HOSTED_FN("toReversed", "TypedArrayToReversed", 0, 0),
+ JS_SELF_HOSTED_FN("toSorted", "TypedArrayToSorted", 1, 0),
+ JS_SELF_HOSTED_FN("with", "TypedArrayWith", 2, 0),
+ JS_FS_END,
+};
+
+/* static */ const JSFunctionSpec TypedArrayObject::staticFunctions[] = {
+ JS_SELF_HOSTED_FN("from", "TypedArrayStaticFrom", 3, 0),
+ JS_SELF_HOSTED_FN("of", "TypedArrayStaticOf", 0, 0),
+ JS_FS_END,
+};
+
+/* static */ const JSPropertySpec TypedArrayObject::staticProperties[] = {
+ JS_SELF_HOSTED_SYM_GET(species, "$TypedArraySpecies", 0),
+ JS_PS_END,
+};
+
+static JSObject* CreateSharedTypedArrayPrototype(JSContext* cx,
+ JSProtoKey key) {
+ return GlobalObject::createBlankPrototype(
+ cx, cx->global(), &TypedArrayObject::sharedTypedArrayPrototypeClass);
+}
+
+static const ClassSpec TypedArrayObjectSharedTypedArrayPrototypeClassSpec = {
+ GenericCreateConstructor<TypedArrayConstructor, 0, gc::AllocKind::FUNCTION>,
+ CreateSharedTypedArrayPrototype,
+ TypedArrayObject::staticFunctions,
+ TypedArrayObject::staticProperties,
+ TypedArrayObject::protoFunctions,
+ TypedArrayObject::protoAccessors,
+ nullptr,
+ ClassSpec::DontDefineConstructor,
+};
+
+/* static */ const JSClass TypedArrayObject::sharedTypedArrayPrototypeClass = {
+ "TypedArrayPrototype",
+ JSCLASS_HAS_CACHED_PROTO(JSProto_TypedArray),
+ JS_NULL_CLASS_OPS,
+ &TypedArrayObjectSharedTypedArrayPrototypeClassSpec,
+};
+
+namespace {
+
+// This default implementation is only valid for integer types less
+// than 32-bits in size.
+template <typename NativeType>
+bool TypedArrayObjectTemplate<NativeType>::getElementPure(
+ TypedArrayObject* tarray, size_t index, Value* vp) {
+ static_assert(sizeof(NativeType) < 4,
+ "this method must only handle NativeType values that are "
+ "always exact int32_t values");
+
+ *vp = Int32Value(getIndex(tarray, index));
+ return true;
+}
+
+// We need to specialize for floats and other integer types.
+template <>
+bool TypedArrayObjectTemplate<int32_t>::getElementPure(TypedArrayObject* tarray,
+ size_t index,
+ Value* vp) {
+ *vp = Int32Value(getIndex(tarray, index));
+ return true;
+}
+
+template <>
+bool TypedArrayObjectTemplate<uint32_t>::getElementPure(
+ TypedArrayObject* tarray, size_t index, Value* vp) {
+ uint32_t val = getIndex(tarray, index);
+ *vp = NumberValue(val);
+ return true;
+}
+
+template <>
+bool TypedArrayObjectTemplate<float>::getElementPure(TypedArrayObject* tarray,
+ size_t index, Value* vp) {
+ float val = getIndex(tarray, index);
+ double dval = val;
+
+ /*
+ * Doubles in typed arrays could be typed-punned arrays of integers. This
+ * could allow user code to break the engine-wide invariant that only
+ * canonical nans are stored into jsvals, which means user code could
+ * confuse the engine into interpreting a double-typed jsval as an
+ * object-typed jsval.
+ *
+ * This could be removed for platforms/compilers known to convert a 32-bit
+ * non-canonical nan to a 64-bit canonical nan.
+ */
+ *vp = JS::CanonicalizedDoubleValue(dval);
+ return true;
+}
+
+template <>
+bool TypedArrayObjectTemplate<double>::getElementPure(TypedArrayObject* tarray,
+ size_t index, Value* vp) {
+ double val = getIndex(tarray, index);
+
+ /*
+ * Doubles in typed arrays could be typed-punned arrays of integers. This
+ * could allow user code to break the engine-wide invariant that only
+ * canonical nans are stored into jsvals, which means user code could
+ * confuse the engine into interpreting a double-typed jsval as an
+ * object-typed jsval.
+ */
+ *vp = JS::CanonicalizedDoubleValue(val);
+ return true;
+}
+
+template <>
+bool TypedArrayObjectTemplate<int64_t>::getElementPure(TypedArrayObject* tarray,
+ size_t index,
+ Value* vp) {
+ return false;
+}
+
+template <>
+bool TypedArrayObjectTemplate<uint64_t>::getElementPure(
+ TypedArrayObject* tarray, size_t index, Value* vp) {
+ return false;
+}
+} /* anonymous namespace */
+
+namespace {
+
+template <typename NativeType>
+bool TypedArrayObjectTemplate<NativeType>::getElement(JSContext* cx,
+ TypedArrayObject* tarray,
+ size_t index,
+ MutableHandleValue val) {
+ MOZ_ALWAYS_TRUE(getElementPure(tarray, index, val.address()));
+ return true;
+}
+
+template <>
+bool TypedArrayObjectTemplate<int64_t>::getElement(JSContext* cx,
+ TypedArrayObject* tarray,
+ size_t index,
+ MutableHandleValue val) {
+ int64_t n = getIndex(tarray, index);
+ BigInt* res = BigInt::createFromInt64(cx, n);
+ if (!res) {
+ return false;
+ }
+ val.setBigInt(res);
+ return true;
+}
+
+template <>
+bool TypedArrayObjectTemplate<uint64_t>::getElement(JSContext* cx,
+ TypedArrayObject* tarray,
+ size_t index,
+ MutableHandleValue val) {
+ uint64_t n = getIndex(tarray, index);
+ BigInt* res = BigInt::createFromUint64(cx, n);
+ if (!res) {
+ return false;
+ }
+ val.setBigInt(res);
+ return true;
+}
+} /* anonymous namespace */
+
+namespace js {
+
+template <>
+bool TypedArrayObject::getElement<CanGC>(JSContext* cx, size_t index,
+ MutableHandleValue val) {
+ switch (type()) {
+#define GET_ELEMENT(_, T, N) \
+ case Scalar::N: \
+ return N##Array::getElement(cx, this, index, val);
+ JS_FOR_EACH_TYPED_ARRAY(GET_ELEMENT)
+#undef GET_ELEMENT
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ break;
+ }
+
+ MOZ_CRASH("Unknown TypedArray type");
+}
+
+template <>
+bool TypedArrayObject::getElement<NoGC>(
+ JSContext* cx, size_t index,
+ typename MaybeRooted<Value, NoGC>::MutableHandleType vp) {
+ return getElementPure(index, vp.address());
+}
+
+} // namespace js
+
+bool TypedArrayObject::getElementPure(size_t index, Value* vp) {
+ switch (type()) {
+#define GET_ELEMENT_PURE(_, T, N) \
+ case Scalar::N: \
+ return N##Array::getElementPure(this, index, vp);
+ JS_FOR_EACH_TYPED_ARRAY(GET_ELEMENT_PURE)
+#undef GET_ELEMENT
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ break;
+ }
+
+ MOZ_CRASH("Unknown TypedArray type");
+}
+
+/* static */
+bool TypedArrayObject::getElements(JSContext* cx,
+ Handle<TypedArrayObject*> tarray,
+ Value* vp) {
+ size_t length = tarray->length();
+ MOZ_ASSERT_IF(length > 0, !tarray->hasDetachedBuffer());
+
+ switch (tarray->type()) {
+#define GET_ELEMENTS(_, T, N) \
+ case Scalar::N: \
+ for (size_t i = 0; i < length; ++i, ++vp) { \
+ if (!N##Array::getElement(cx, tarray, i, \
+ MutableHandleValue::fromMarkedLocation(vp))) { \
+ return false; \
+ } \
+ } \
+ return true;
+ JS_FOR_EACH_TYPED_ARRAY(GET_ELEMENTS)
+#undef GET_ELEMENTS
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ break;
+ }
+
+ MOZ_CRASH("Unknown TypedArray type");
+}
+
+/***
+ *** JS impl
+ ***/
+
+/*
+ * TypedArrayObject boilerplate
+ */
+
+static const JSClassOps TypedArrayClassOps = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ TypedArrayObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ ArrayBufferViewObject::trace, // trace
+};
+
+static const ClassExtension TypedArrayClassExtension = {
+ TypedArrayObject::objectMoved, // objectMovedOp
+};
+
+static const JSPropertySpec
+ static_prototype_properties[Scalar::MaxTypedArrayViewType][2] = {
+#define IMPL_TYPED_ARRAY_PROPERTIES(ExternalType, NativeType, Name) \
+ {JS_INT32_PS("BYTES_PER_ELEMENT", Name##Array::BYTES_PER_ELEMENT, \
+ JSPROP_READONLY | JSPROP_PERMANENT), \
+ JS_PS_END},
+
+ JS_FOR_EACH_TYPED_ARRAY(IMPL_TYPED_ARRAY_PROPERTIES)
+#undef IMPL_TYPED_ARRAY_PROPERTIES
+};
+
+static const ClassSpec
+ TypedArrayObjectClassSpecs[Scalar::MaxTypedArrayViewType] = {
+#define IMPL_TYPED_ARRAY_CLASS_SPEC(ExternalType, NativeType, Name) \
+ {Name##Array::createConstructor, \
+ Name##Array::createPrototype, \
+ nullptr, \
+ static_prototype_properties[Scalar::Type::Name], \
+ nullptr, \
+ static_prototype_properties[Scalar::Type::Name], \
+ nullptr, \
+ JSProto_TypedArray},
+
+ JS_FOR_EACH_TYPED_ARRAY(IMPL_TYPED_ARRAY_CLASS_SPEC)
+#undef IMPL_TYPED_ARRAY_CLASS_SPEC
+};
+
+const JSClass TypedArrayObject::classes[Scalar::MaxTypedArrayViewType] = {
+#define IMPL_TYPED_ARRAY_CLASS(ExternalType, NativeType, Name) \
+ {#Name "Array", \
+ JSCLASS_HAS_RESERVED_SLOTS(TypedArrayObject::RESERVED_SLOTS) | \
+ JSCLASS_HAS_CACHED_PROTO(JSProto_##Name##Array) | \
+ JSCLASS_DELAY_METADATA_BUILDER | JSCLASS_SKIP_NURSERY_FINALIZE | \
+ JSCLASS_BACKGROUND_FINALIZE, \
+ &TypedArrayClassOps, &TypedArrayObjectClassSpecs[Scalar::Type::Name], \
+ &TypedArrayClassExtension},
+
+ JS_FOR_EACH_TYPED_ARRAY(IMPL_TYPED_ARRAY_CLASS)
+#undef IMPL_TYPED_ARRAY_CLASS
+};
+
+// The various typed array prototypes are supposed to 1) be normal objects,
+// 2) stringify to "[object <name of constructor>]", and 3) (Gecko-specific)
+// be xrayable. The first and second requirements mandate (in the absence of
+// @@toStringTag) a custom class. The third requirement mandates that each
+// prototype's class have the relevant typed array's cached JSProtoKey in them.
+// Thus we need one class with cached prototype per kind of typed array, with a
+// delegated ClassSpec.
+//
+// Actually ({}).toString.call(Uint8Array.prototype) should throw, because
+// Uint8Array.prototype lacks the the typed array internal slots. (Same as
+// with %TypedArray%.prototype.) It's not clear this is desirable (see
+// above), but it's what we've always done, so keep doing it till we
+// implement @@toStringTag or ES6 changes.
+const JSClass TypedArrayObject::protoClasses[Scalar::MaxTypedArrayViewType] = {
+#define IMPL_TYPED_ARRAY_PROTO_CLASS(ExternalType, NativeType, Name) \
+ {#Name "Array.prototype", JSCLASS_HAS_CACHED_PROTO(JSProto_##Name##Array), \
+ JS_NULL_CLASS_OPS, &TypedArrayObjectClassSpecs[Scalar::Type::Name]},
+
+ JS_FOR_EACH_TYPED_ARRAY(IMPL_TYPED_ARRAY_PROTO_CLASS)
+#undef IMPL_TYPED_ARRAY_PROTO_CLASS
+};
+
+/* static */
+bool TypedArrayObject::isOriginalLengthGetter(Native native) {
+ return native == TypedArray_lengthGetter;
+}
+
+/* static */
+bool TypedArrayObject::isOriginalByteOffsetGetter(Native native) {
+ return native == TypedArray_byteOffsetGetter;
+}
+
+/* static */
+bool TypedArrayObject::isOriginalByteLengthGetter(Native native) {
+ return native == TypedArray_byteLengthGetter;
+}
+
+bool js::IsTypedArrayConstructor(const JSObject* obj) {
+#define CHECK_TYPED_ARRAY_CONSTRUCTOR(_, T, N) \
+ if (IsNativeFunction(obj, N##Array::class_constructor)) { \
+ return true; \
+ }
+ JS_FOR_EACH_TYPED_ARRAY(CHECK_TYPED_ARRAY_CONSTRUCTOR)
+#undef CHECK_TYPED_ARRAY_CONSTRUCTOR
+ return false;
+}
+
+bool js::IsTypedArrayConstructor(HandleValue v, Scalar::Type type) {
+ return IsNativeFunction(v, TypedArrayConstructorNative(type));
+}
+
+JSNative js::TypedArrayConstructorNative(Scalar::Type type) {
+#define TYPED_ARRAY_CONSTRUCTOR_NATIVE(_, T, N) \
+ if (type == Scalar::N) { \
+ return N##Array::class_constructor; \
+ }
+ JS_FOR_EACH_TYPED_ARRAY(TYPED_ARRAY_CONSTRUCTOR_NATIVE)
+#undef TYPED_ARRAY_CONSTRUCTOR_NATIVE
+
+ MOZ_CRASH("unexpected typed array type");
+}
+
+bool js::IsBufferSource(JSObject* object, SharedMem<uint8_t*>* dataPointer,
+ size_t* byteLength) {
+ if (object->is<TypedArrayObject>()) {
+ TypedArrayObject& view = object->as<TypedArrayObject>();
+ *dataPointer = view.dataPointerEither().cast<uint8_t*>();
+ *byteLength = view.byteLength();
+ return true;
+ }
+
+ if (object->is<DataViewObject>()) {
+ DataViewObject& view = object->as<DataViewObject>();
+ *dataPointer = view.dataPointerEither().cast<uint8_t*>();
+ *byteLength = view.byteLength();
+ return true;
+ }
+
+ if (object->is<ArrayBufferObject>()) {
+ ArrayBufferObject& buffer = object->as<ArrayBufferObject>();
+ *dataPointer = buffer.dataPointerShared();
+ *byteLength = buffer.byteLength();
+ return true;
+ }
+
+ if (object->is<SharedArrayBufferObject>()) {
+ SharedArrayBufferObject& buffer = object->as<SharedArrayBufferObject>();
+ *dataPointer = buffer.dataPointerShared();
+ *byteLength = buffer.byteLength();
+ return true;
+ }
+
+ return false;
+}
+
+template <typename CharT>
+static inline bool StringIsInfinity(mozilla::Range<const CharT> s) {
+ static constexpr std::string_view Infinity = "Infinity";
+
+ // Compilers optimize this to one |cmp| instruction on x64 resp. two for x86,
+ // when the input is a Latin-1 string, because the string "Infinity" is
+ // exactly eight characters long, so it can be represented as a single uint64
+ // value.
+ return s.length() == Infinity.length() &&
+ EqualChars(s.begin().get(), Infinity.data(), Infinity.length());
+}
+
+template <typename CharT>
+static inline bool StringIsNaN(mozilla::Range<const CharT> s) {
+ static constexpr std::string_view NaN = "NaN";
+
+ // "NaN" is not as nicely optimizable as "Infinity", but oh well.
+ return s.length() == NaN.length() &&
+ EqualChars(s.begin().get(), NaN.data(), NaN.length());
+}
+
+template <typename CharT>
+static mozilla::Maybe<uint64_t> StringToTypedArrayIndexSlow(
+ mozilla::Range<const CharT> s) {
+ const mozilla::RangedPtr<const CharT> start = s.begin();
+ const mozilla::RangedPtr<const CharT> end = s.end();
+
+ const CharT* actualEnd;
+ double result = js_strtod(start.get(), end.get(), &actualEnd);
+
+ // The complete string must have been parsed.
+ if (actualEnd != end.get()) {
+ return mozilla::Nothing();
+ }
+
+ // Now convert it back to a string.
+ ToCStringBuf cbuf;
+ size_t cstrlen;
+ const char* cstr = js::NumberToCString(&cbuf, result, &cstrlen);
+ MOZ_ASSERT(cstr);
+
+ // Both strings must be equal for a canonical numeric index string.
+ if (s.length() != cstrlen || !EqualChars(start.get(), cstr, cstrlen)) {
+ return mozilla::Nothing();
+ }
+
+ // Directly perform IsInteger() check and encode negative and non-integer
+ // indices as OOB.
+ // See 9.4.5.2 [[HasProperty]], steps 3.b.iii and 3.b.v.
+ // See 9.4.5.3 [[DefineOwnProperty]], steps 3.b.i and 3.b.iii.
+ // See 9.4.5.8 IntegerIndexedElementGet, steps 5 and 8.
+ // See 9.4.5.9 IntegerIndexedElementSet, steps 6 and 9.
+ if (result < 0 || !IsInteger(result)) {
+ return mozilla::Some(UINT64_MAX);
+ }
+
+ // Anything equals-or-larger than 2^53 is definitely OOB, encode it
+ // accordingly so that the cast to uint64_t is well defined.
+ if (result >= DOUBLE_INTEGRAL_PRECISION_LIMIT) {
+ return mozilla::Some(UINT64_MAX);
+ }
+
+ // The string is an actual canonical numeric index.
+ return mozilla::Some(result);
+}
+
+template <typename CharT>
+mozilla::Maybe<uint64_t> js::StringToTypedArrayIndex(
+ mozilla::Range<const CharT> s) {
+ mozilla::RangedPtr<const CharT> cp = s.begin();
+ const mozilla::RangedPtr<const CharT> end = s.end();
+
+ MOZ_ASSERT(cp < end, "caller must check for empty strings");
+
+ bool negative = false;
+ if (*cp == '-') {
+ negative = true;
+ if (++cp == end) {
+ return mozilla::Nothing();
+ }
+ }
+
+ if (!IsAsciiDigit(*cp)) {
+ // Check for "NaN", "Infinity", or "-Infinity".
+ if ((!negative && StringIsNaN<CharT>({cp, end})) ||
+ StringIsInfinity<CharT>({cp, end})) {
+ return mozilla::Some(UINT64_MAX);
+ }
+ return mozilla::Nothing();
+ }
+
+ uint32_t digit = AsciiDigitToNumber(*cp++);
+
+ // Don't allow leading zeros.
+ if (digit == 0 && cp != end) {
+ // The string may be of the form "0.xyz". The exponent form isn't possible
+ // when the string starts with "0".
+ if (*cp == '.') {
+ return StringToTypedArrayIndexSlow(s);
+ }
+ return mozilla::Nothing();
+ }
+
+ uint64_t index = digit;
+
+ for (; cp < end; cp++) {
+ if (!IsAsciiDigit(*cp)) {
+ // Take the slow path when the string has fractional parts or an exponent.
+ if (*cp == '.' || *cp == 'e') {
+ return StringToTypedArrayIndexSlow(s);
+ }
+ return mozilla::Nothing();
+ }
+
+ digit = AsciiDigitToNumber(*cp);
+
+ static_assert(
+ uint64_t(DOUBLE_INTEGRAL_PRECISION_LIMIT) < (UINT64_MAX - 10) / 10,
+ "2^53 is way below UINT64_MAX, so |10 * index + digit| can't overflow");
+
+ index = 10 * index + digit;
+
+ // Also take the slow path when the string is larger-or-equals 2^53.
+ if (index >= uint64_t(DOUBLE_INTEGRAL_PRECISION_LIMIT)) {
+ return StringToTypedArrayIndexSlow(s);
+ }
+ }
+
+ if (negative) {
+ return mozilla::Some(UINT64_MAX);
+ }
+ return mozilla::Some(index);
+}
+
+template mozilla::Maybe<uint64_t> js::StringToTypedArrayIndex(
+ mozilla::Range<const char16_t> s);
+
+template mozilla::Maybe<uint64_t> js::StringToTypedArrayIndex(
+ mozilla::Range<const Latin1Char> s);
+
+bool js::SetTypedArrayElement(JSContext* cx, Handle<TypedArrayObject*> obj,
+ uint64_t index, HandleValue v,
+ ObjectOpResult& result) {
+ TypedArrayObject* tobj = &obj->as<TypedArrayObject>();
+
+ switch (tobj->type()) {
+#define SET_TYPED_ARRAY_ELEMENT(_, T, N) \
+ case Scalar::N: \
+ return TypedArrayObjectTemplate<T>::setElement(cx, obj, index, v, result);
+ JS_FOR_EACH_TYPED_ARRAY(SET_TYPED_ARRAY_ELEMENT)
+#undef SET_TYPED_ARRAY_ELEMENT
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ break;
+ }
+
+ MOZ_CRASH("Unsupported TypedArray type");
+}
+
+// ES2021 draft rev b3f9b5089bcc3ddd8486379015cd11eb1427a5eb
+// 9.4.5.3 [[DefineOwnProperty]], step 3.b.
+bool js::DefineTypedArrayElement(JSContext* cx, Handle<TypedArrayObject*> obj,
+ uint64_t index,
+ Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result) {
+ // These are all substeps of 3.b.
+
+ // Step i.
+ if (index >= obj->length()) {
+ if (obj->hasDetachedBuffer()) {
+ return result.fail(JSMSG_TYPED_ARRAY_DETACHED);
+ }
+ return result.fail(JSMSG_DEFINE_BAD_INDEX);
+ }
+
+ // Step ii.
+ if (desc.isAccessorDescriptor()) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+
+ // Step iii.
+ if (desc.hasConfigurable() && !desc.configurable()) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+
+ // Step iv.
+ if (desc.hasEnumerable() && !desc.enumerable()) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+
+ // Step v.
+ if (desc.hasWritable() && !desc.writable()) {
+ return result.fail(JSMSG_CANT_REDEFINE_PROP);
+ }
+
+ // Step vi.
+ if (desc.hasValue()) {
+ switch (obj->type()) {
+#define DEFINE_TYPED_ARRAY_ELEMENT(_, T, N) \
+ case Scalar::N: \
+ return TypedArrayObjectTemplate<T>::setElement(cx, obj, index, \
+ desc.value(), result);
+ JS_FOR_EACH_TYPED_ARRAY(DEFINE_TYPED_ARRAY_ELEMENT)
+#undef DEFINE_TYPED_ARRAY_ELEMENT
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::Int64:
+ case Scalar::Simd128:
+ break;
+ }
+
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+
+ // Step vii.
+ return result.succeed();
+}
+
+template <typename T, typename U>
+static constexpr typename std::enable_if_t<std::is_unsigned_v<T>, U>
+UnsignedSortValue(U val) {
+ return val;
+}
+
+template <typename T, typename U>
+static constexpr
+ typename std::enable_if_t<std::is_integral_v<T> && std::is_signed_v<T>, U>
+ UnsignedSortValue(U val) {
+ // Flip sign bit.
+ return val ^ static_cast<U>(std::numeric_limits<T>::min());
+}
+
+template <typename T, typename UnsignedT>
+static constexpr
+ typename std::enable_if_t<std::is_floating_point_v<T>, UnsignedT>
+ UnsignedSortValue(UnsignedT val) {
+ // Flip sign bit for positive numbers; flip all bits for negative numbers,
+ // except negative NaNs.
+ using FloatingPoint = mozilla::FloatingPoint<T>;
+ static_assert(std::is_same_v<typename FloatingPoint::Bits, UnsignedT>,
+ "FloatingPoint::Bits matches the unsigned int representation");
+
+ // FF80'0000 is negative infinity, (FF80'0000, FFFF'FFFF] are all NaNs with
+ // the sign-bit set (and the equivalent holds for double values). So any value
+ // larger than negative infinity is a negative NaN.
+ constexpr UnsignedT NegativeInfinity =
+ FloatingPoint::kSignBit | FloatingPoint::kExponentBits;
+ if (val > NegativeInfinity) {
+ return val;
+ }
+ if (val & FloatingPoint::kSignBit) {
+ return ~val;
+ }
+ return val ^ FloatingPoint::kSignBit;
+}
+
+template <typename T>
+static typename std::enable_if_t<std::is_integral_v<T> ||
+ std::is_same_v<T, uint8_clamped>>
+TypedArrayStdSort(SharedMem<void*> data, size_t length) {
+ T* unwrapped = data.cast<T*>().unwrapUnshared();
+ std::sort(unwrapped, unwrapped + length);
+}
+
+template <typename T>
+static typename std::enable_if_t<std::is_floating_point_v<T>> TypedArrayStdSort(
+ SharedMem<void*> data, size_t length) {
+ // Sort on the unsigned representation for performance reasons.
+ using UnsignedT =
+ typename mozilla::UnsignedStdintTypeForSize<sizeof(T)>::Type;
+ UnsignedT* unwrapped = data.cast<UnsignedT*>().unwrapUnshared();
+ std::sort(unwrapped, unwrapped + length, [](UnsignedT x, UnsignedT y) {
+ constexpr auto SortValue = UnsignedSortValue<T, UnsignedT>;
+ return SortValue(x) < SortValue(y);
+ });
+}
+
+template <typename T, typename Ops>
+static typename std::enable_if_t<std::is_same_v<Ops, UnsharedOps>, bool>
+TypedArrayStdSort(JSContext* cx, TypedArrayObject* typedArray) {
+ TypedArrayStdSort<T>(typedArray->dataPointerEither(), typedArray->length());
+ return true;
+}
+
+template <typename T, typename Ops>
+static typename std::enable_if_t<std::is_same_v<Ops, SharedOps>, bool>
+TypedArrayStdSort(JSContext* cx, TypedArrayObject* typedArray) {
+ // Always create a copy when sorting shared memory backed typed arrays to
+ // ensure concurrent write accesses doesn't lead to UB when calling std::sort.
+ size_t length = typedArray->length();
+ auto ptr = cx->make_pod_array<T>(length);
+ if (!ptr) {
+ return false;
+ }
+ SharedMem<T*> unshared = SharedMem<T*>::unshared(ptr.get());
+ SharedMem<T*> data = typedArray->dataPointerShared().cast<T*>();
+
+ Ops::podCopy(unshared, data, length);
+
+ TypedArrayStdSort<T>(unshared.template cast<void*>(), length);
+
+ Ops::podCopy(data, unshared, length);
+
+ return true;
+}
+
+template <typename T, typename Ops>
+static bool TypedArrayCountingSort(JSContext* cx,
+ TypedArrayObject* typedArray) {
+ static_assert(std::is_integral_v<T> || std::is_same_v<T, uint8_clamped>,
+ "Counting sort expects integral array elements");
+
+ size_t length = typedArray->length();
+
+ // Determined by performance testing.
+ if (length <= 64) {
+ return TypedArrayStdSort<T, Ops>(cx, typedArray);
+ }
+
+ // Map signed values onto the unsigned range when storing in buffer.
+ using UnsignedT =
+ typename mozilla::UnsignedStdintTypeForSize<sizeof(T)>::Type;
+ constexpr T min = std::numeric_limits<T>::min();
+
+ constexpr size_t InlineStorage = sizeof(T) == 1 ? 256 : 0;
+ Vector<size_t, InlineStorage> buffer(cx);
+ if (!buffer.resize(size_t(std::numeric_limits<UnsignedT>::max()) + 1)) {
+ return false;
+ }
+
+ SharedMem<T*> data = typedArray->dataPointerEither().cast<T*>();
+
+ // Populate the buffer.
+ for (size_t i = 0; i < length; i++) {
+ T val = Ops::load(data + i);
+ buffer[UnsignedT(val - min)]++;
+ }
+
+ // Traverse the buffer in order and write back elements to array.
+ UnsignedT val = UnsignedT(-1); // intentional overflow on first increment
+ for (size_t i = 0; i < length;) {
+ // Invariant: sum(buffer[val:]) == length-i
+ size_t j;
+ do {
+ j = buffer[++val];
+ } while (j == 0);
+
+ for (; j > 0; j--) {
+ Ops::store(data + i++, T(val + min));
+ }
+ }
+
+ return true;
+}
+
+template <typename T, typename U, typename Ops>
+static void SortByColumn(SharedMem<U*> data, size_t length, SharedMem<U*> aux,
+ uint8_t col) {
+ static_assert(std::is_unsigned_v<U>, "SortByColumn sorts on unsigned values");
+ static_assert(std::is_same_v<Ops, UnsharedOps>,
+ "SortByColumn only works on unshared data");
+
+ // |counts| is used to compute the starting index position for each key.
+ // Letting counts[0] always be 0, simplifies the transform step below.
+ // Example:
+ //
+ // Computing frequency counts for the input [1 2 1] gives:
+ // 0 1 2 3 ... (keys)
+ // 0 0 2 1 (frequencies)
+ //
+ // Transforming frequencies to indexes gives:
+ // 0 1 2 3 ... (keys)
+ // 0 0 2 3 (indexes)
+
+ constexpr size_t R = 256;
+
+ // Initialize all entries to zero.
+ size_t counts[R + 1] = {};
+
+ const auto ByteAtCol = [col](U x) {
+ U y = UnsignedSortValue<T, U>(x);
+ return static_cast<uint8_t>(y >> (col * 8));
+ };
+
+ // Compute frequency counts.
+ for (size_t i = 0; i < length; i++) {
+ U val = Ops::load(data + i);
+ uint8_t b = ByteAtCol(val);
+ counts[b + 1]++;
+ }
+
+ // Transform counts to indices.
+ std::partial_sum(std::begin(counts), std::end(counts), std::begin(counts));
+
+ // Distribute
+ for (size_t i = 0; i < length; i++) {
+ U val = Ops::load(data + i);
+ uint8_t b = ByteAtCol(val);
+ size_t j = counts[b]++;
+ MOZ_ASSERT(j < length,
+ "index is in bounds when |data| can't be modified concurrently");
+ UnsharedOps::store(aux + j, val);
+ }
+
+ // Copy back
+ Ops::podCopy(data, aux, length);
+}
+
+template <typename T, typename Ops>
+static bool TypedArrayRadixSort(JSContext* cx, TypedArrayObject* typedArray) {
+ size_t length = typedArray->length();
+
+ // Determined by performance testing.
+ constexpr size_t StdSortMinCutoff = sizeof(T) == 2 ? 64 : 256;
+
+ // Radix sort uses O(n) additional space, limit this space to 64 MB.
+ constexpr size_t StdSortMaxCutoff = (64 * 1024 * 1024) / sizeof(T);
+
+ if (length <= StdSortMinCutoff || length >= StdSortMaxCutoff) {
+ return TypedArrayStdSort<T, Ops>(cx, typedArray);
+ }
+
+ if constexpr (sizeof(T) == 2) {
+ // Radix sort uses O(n) additional space, so when |n| reaches 2^16, switch
+ // over to counting sort to limit the additional space needed to 2^16.
+ constexpr size_t CountingSortMaxCutoff = 65536;
+
+ if (length >= CountingSortMaxCutoff) {
+ return TypedArrayCountingSort<T, Ops>(cx, typedArray);
+ }
+ }
+
+ using UnsignedT =
+ typename mozilla::UnsignedStdintTypeForSize<sizeof(T)>::Type;
+
+ auto ptr = cx->make_zeroed_pod_array<UnsignedT>(length);
+ if (!ptr) {
+ return false;
+ }
+ SharedMem<UnsignedT*> aux = SharedMem<UnsignedT*>::unshared(ptr.get());
+
+ SharedMem<UnsignedT*> data =
+ typedArray->dataPointerEither().cast<UnsignedT*>();
+
+ // Always create a copy when sorting shared memory backed typed arrays to
+ // ensure concurrent write accesses don't lead to computing bad indices.
+ SharedMem<UnsignedT*> unshared;
+ SharedMem<UnsignedT*> shared;
+ UniquePtr<UnsignedT[], JS::FreePolicy> ptrUnshared;
+ if constexpr (std::is_same_v<Ops, SharedOps>) {
+ ptrUnshared = cx->make_pod_array<UnsignedT>(length);
+ if (!ptrUnshared) {
+ return false;
+ }
+ unshared = SharedMem<UnsignedT*>::unshared(ptrUnshared.get());
+ shared = data;
+
+ Ops::podCopy(unshared, shared, length);
+
+ data = unshared;
+ }
+
+ for (uint8_t col = 0; col < sizeof(UnsignedT); col++) {
+ SortByColumn<T, UnsignedT, UnsharedOps>(data, length, aux, col);
+ }
+
+ if constexpr (std::is_same_v<Ops, SharedOps>) {
+ Ops::podCopy(shared, unshared, length);
+ }
+
+ return true;
+}
+
+using TypedArraySortFn = bool (*)(JSContext*, TypedArrayObject*);
+
+template <typename T, typename Ops>
+static constexpr typename std::enable_if_t<sizeof(T) == 1, TypedArraySortFn>
+TypedArraySort() {
+ return TypedArrayCountingSort<T, Ops>;
+}
+
+template <typename T, typename Ops>
+static constexpr typename std::enable_if_t<sizeof(T) == 2 || sizeof(T) == 4,
+ TypedArraySortFn>
+TypedArraySort() {
+ return TypedArrayRadixSort<T, Ops>;
+}
+
+template <typename T, typename Ops>
+static constexpr typename std::enable_if_t<sizeof(T) == 8, TypedArraySortFn>
+TypedArraySort() {
+ return TypedArrayStdSort<T, Ops>;
+}
+
+bool js::intrinsic_TypedArrayNativeSort(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ MOZ_ASSERT(args.length() == 1);
+
+ TypedArrayObject* typedArray =
+ UnwrapAndDowncastValue<TypedArrayObject>(cx, args[0]);
+ if (!typedArray) {
+ return false;
+ }
+
+ MOZ_RELEASE_ASSERT(!typedArray->hasDetachedBuffer());
+
+ bool isShared = typedArray->isSharedMemory();
+ switch (typedArray->type()) {
+#define SORT(_, T, N) \
+ case Scalar::N: \
+ if (isShared) { \
+ if (!TypedArraySort<T, SharedOps>()(cx, typedArray)) { \
+ return false; \
+ } \
+ } else { \
+ if (!TypedArraySort<T, UnsharedOps>()(cx, typedArray)) { \
+ return false; \
+ } \
+ } \
+ break;
+ JS_FOR_EACH_TYPED_ARRAY(SORT)
+#undef SORT
+ default:
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+
+ args.rval().set(args[0]);
+ return true;
+}
+
+/* JS Public API */
+
+#define IMPL_TYPED_ARRAY_JSAPI_CONSTRUCTORS(ExternalType, NativeType, Name) \
+ JS_PUBLIC_API JSObject* JS_New##Name##Array(JSContext* cx, \
+ size_t nelements) { \
+ return TypedArrayObjectTemplate<NativeType>::fromLength(cx, nelements); \
+ } \
+ \
+ JS_PUBLIC_API JSObject* JS_New##Name##ArrayFromArray(JSContext* cx, \
+ HandleObject other) { \
+ return TypedArrayObjectTemplate<NativeType>::fromArray(cx, other); \
+ } \
+ \
+ JS_PUBLIC_API JSObject* JS_New##Name##ArrayWithBuffer( \
+ JSContext* cx, HandleObject arrayBuffer, size_t byteOffset, \
+ int64_t length) { \
+ return TypedArrayObjectTemplate<NativeType>::fromBuffer( \
+ cx, arrayBuffer, byteOffset, length); \
+ } \
+ \
+ JS_PUBLIC_API JSObject* js::Unwrap##Name##Array(JSObject* obj) { \
+ obj = obj->maybeUnwrapIf<TypedArrayObject>(); \
+ if (!obj) { \
+ return nullptr; \
+ } \
+ const JSClass* clasp = obj->getClass(); \
+ if (clasp != TypedArrayObjectTemplate<NativeType>::instanceClass()) { \
+ return nullptr; \
+ } \
+ return obj; \
+ } \
+ \
+ JS_PUBLIC_API ExternalType* JS_Get##Name##ArrayLengthAndData( \
+ JSObject* obj, size_t* length, bool* isSharedMemory, \
+ const JS::AutoRequireNoGC& nogc) { \
+ TypedArrayObject* tarr = obj->maybeUnwrapAs<TypedArrayObject>(); \
+ if (!tarr) { \
+ return nullptr; \
+ } \
+ return JS::TypedArray<JS::Scalar::Name>::fromObject(tarr) \
+ .getLengthAndData(length, isSharedMemory, nogc); \
+ } \
+ \
+ JS_PUBLIC_API ExternalType* JS_Get##Name##ArrayData( \
+ JSObject* obj, bool* isSharedMemory, const JS::AutoRequireNoGC& nogc) { \
+ size_t length; \
+ return JS_Get##Name##ArrayLengthAndData(obj, &length, isSharedMemory, \
+ nogc); \
+ } \
+ JS_PUBLIC_API JSObject* JS_GetObjectAs##Name##Array( \
+ JSObject* obj, size_t* length, bool* isShared, ExternalType** data) { \
+ obj = js::Unwrap##Name##Array(obj); \
+ if (!obj) { \
+ return nullptr; \
+ } \
+ TypedArrayObject* tarr = &obj->as<TypedArrayObject>(); \
+ *length = tarr->length(); \
+ *isShared = tarr->isSharedMemory(); \
+ *data = static_cast<ExternalType*>(tarr->dataPointerEither().unwrap( \
+ /*safe - caller sees isShared flag*/)); \
+ return obj; \
+ }
+
+JS_FOR_EACH_TYPED_ARRAY(IMPL_TYPED_ARRAY_JSAPI_CONSTRUCTORS)
+#undef IMPL_TYPED_ARRAY_JSAPI_CONSTRUCTORS
+
+JS_PUBLIC_API bool JS_IsTypedArrayObject(JSObject* obj) {
+ return obj->canUnwrapAs<TypedArrayObject>();
+}
+
+JS_PUBLIC_API size_t JS_GetTypedArrayLength(JSObject* obj) {
+ TypedArrayObject* tarr = obj->maybeUnwrapAs<TypedArrayObject>();
+ if (!tarr) {
+ return 0;
+ }
+ return tarr->length();
+}
+
+JS_PUBLIC_API size_t JS_GetTypedArrayByteOffset(JSObject* obj) {
+ TypedArrayObject* tarr = obj->maybeUnwrapAs<TypedArrayObject>();
+ if (!tarr) {
+ return 0;
+ }
+ return tarr->byteOffset();
+}
+
+JS_PUBLIC_API size_t JS_GetTypedArrayByteLength(JSObject* obj) {
+ TypedArrayObject* tarr = obj->maybeUnwrapAs<TypedArrayObject>();
+ if (!tarr) {
+ return 0;
+ }
+ return tarr->byteLength();
+}
+
+JS_PUBLIC_API bool JS_GetTypedArraySharedness(JSObject* obj) {
+ TypedArrayObject* tarr = obj->maybeUnwrapAs<TypedArrayObject>();
+ if (!tarr) {
+ return false;
+ }
+ return tarr->isSharedMemory();
+}
+
+JS_PUBLIC_API JS::Scalar::Type JS_GetArrayBufferViewType(JSObject* obj) {
+ ArrayBufferViewObject* view = obj->maybeUnwrapAs<ArrayBufferViewObject>();
+ if (!view) {
+ return Scalar::MaxTypedArrayViewType;
+ }
+
+ if (view->is<TypedArrayObject>()) {
+ return view->as<TypedArrayObject>().type();
+ }
+ if (view->is<DataViewObject>()) {
+ return Scalar::MaxTypedArrayViewType;
+ }
+ MOZ_CRASH("invalid ArrayBufferView type");
+}
+
+JS_PUBLIC_API size_t JS_MaxMovableTypedArraySize() {
+ return TypedArrayObject::INLINE_BUFFER_LIMIT;
+}
+
+namespace JS {
+
+const JSClass* const TypedArray_base::classes = TypedArrayObject::classes;
+
+#define INSTANTIATE(ExternalType, NativeType, Name) \
+ template class TypedArray<JS::Scalar::Name>;
+JS_FOR_EACH_TYPED_ARRAY(INSTANTIATE)
+#undef INSTANTIATE
+
+JS::ArrayBufferOrView JS::ArrayBufferOrView::unwrap(JSObject* maybeWrapped) {
+ if (!maybeWrapped) {
+ return JS::ArrayBufferOrView(nullptr);
+ }
+ auto* ab = maybeWrapped->maybeUnwrapIf<ArrayBufferObjectMaybeShared>();
+ if (ab) {
+ return ArrayBufferOrView::fromObject(ab);
+ }
+
+ return ArrayBufferView::unwrap(maybeWrapped);
+}
+
+bool JS::ArrayBufferOrView::isDetached() const {
+ MOZ_ASSERT(obj);
+ if (obj->is<ArrayBufferObject>()) {
+ return obj->as<ArrayBufferObject>().isDetached();
+ } else {
+ return obj->as<ArrayBufferViewObject>().hasDetachedBuffer();
+ }
+}
+
+JS::TypedArray_base JS::TypedArray_base::fromObject(JSObject* unwrapped) {
+ if (unwrapped && unwrapped->is<TypedArrayObject>()) {
+ return TypedArray_base(unwrapped);
+ }
+ return TypedArray_base(nullptr);
+}
+
+// Template getLengthAndData function for TypedArrays, implemented here because
+// it requires internal APIs.
+template <JS::Scalar::Type EType>
+typename TypedArray<EType>::DataType* TypedArray<EType>::getLengthAndData(
+ size_t* length, bool* isSharedMemory, const AutoRequireNoGC&) {
+ using ExternalType = TypedArray<EType>::DataType;
+ if (!obj) {
+ return nullptr;
+ }
+ TypedArrayObject* tarr = &obj->as<TypedArrayObject>();
+ MOZ_ASSERT(tarr);
+ *length = tarr->length();
+ *isSharedMemory = tarr->isSharedMemory();
+ return static_cast<ExternalType*>(
+ tarr->dataPointerEither().unwrap(/*safe - caller sees isShared*/));
+};
+
+// Force the method defined above to actually be instantianted in this
+// compilation unit and emitted into the object file, since otherwise a binary
+// could include the header file and emit an undefined symbol that would not be
+// satisfied by the linker. (This happens with opt gtest, at least. In a DEBUG
+// build, the header contains a call to this function so it will always be
+// emitted.)
+#define INSTANTIATE_GET_DATA(a, b, Name) \
+ template typename TypedArray<JS::Scalar::Name>::DataType* \
+ TypedArray<JS::Scalar::Name>::getLengthAndData( \
+ size_t* length, bool* isSharedMemory, const AutoRequireNoGC&);
+JS_FOR_EACH_TYPED_ARRAY(INSTANTIATE_GET_DATA)
+#undef INSTANTIATE_GET_DATA
+
+} /* namespace JS */
diff --git a/js/src/vm/TypedArrayObject.h b/js/src/vm/TypedArrayObject.h
new file mode 100644
index 0000000000..93f7706a91
--- /dev/null
+++ b/js/src/vm/TypedArrayObject.h
@@ -0,0 +1,301 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_TypedArrayObject_h
+#define vm_TypedArrayObject_h
+
+#include "mozilla/Maybe.h"
+#include "mozilla/TextUtils.h"
+
+#include "gc/AllocKind.h"
+#include "gc/MaybeRooted.h"
+#include "js/Class.h"
+#include "js/experimental/TypedData.h" // js::detail::TypedArrayLengthSlot
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "vm/ArrayBufferObject.h"
+#include "vm/ArrayBufferViewObject.h"
+#include "vm/JSObject.h"
+#include "vm/SharedArrayObject.h"
+
+namespace js {
+
+/*
+ * TypedArrayObject
+ *
+ * The non-templated base class for the specific typed implementations.
+ * This class holds all the member variables that are used by
+ * the subclasses.
+ */
+
+class TypedArrayObject : public ArrayBufferViewObject {
+ public:
+ static_assert(js::detail::TypedArrayLengthSlot == LENGTH_SLOT,
+ "bad inlined constant in TypedData.h");
+ static_assert(js::detail::TypedArrayDataSlot == DATA_SLOT,
+ "bad inlined constant in TypedData.h");
+
+ static bool sameBuffer(Handle<TypedArrayObject*> a,
+ Handle<TypedArrayObject*> b) {
+ // Inline buffers.
+ if (!a->hasBuffer() || !b->hasBuffer()) {
+ return a.get() == b.get();
+ }
+
+ // Shared buffers.
+ if (a->isSharedMemory() && b->isSharedMemory()) {
+ return a->bufferShared()->globalID() == b->bufferShared()->globalID();
+ }
+
+ return a->bufferEither() == b->bufferEither();
+ }
+
+ static const JSClass classes[Scalar::MaxTypedArrayViewType];
+ static const JSClass protoClasses[Scalar::MaxTypedArrayViewType];
+ static const JSClass sharedTypedArrayPrototypeClass;
+
+ static const JSClass* classForType(Scalar::Type type) {
+ MOZ_ASSERT(type < Scalar::MaxTypedArrayViewType);
+ return &classes[type];
+ }
+
+ static const JSClass* protoClassForType(Scalar::Type type) {
+ MOZ_ASSERT(type < Scalar::MaxTypedArrayViewType);
+ return &protoClasses[type];
+ }
+
+ static constexpr size_t FIXED_DATA_START = RESERVED_SLOTS;
+
+ // For typed arrays which can store their data inline, the array buffer
+ // object is created lazily.
+ static constexpr uint32_t INLINE_BUFFER_LIMIT =
+ (NativeObject::MAX_FIXED_SLOTS - FIXED_DATA_START) * sizeof(Value);
+
+ static inline gc::AllocKind AllocKindForLazyBuffer(size_t nbytes);
+
+ inline Scalar::Type type() const;
+ inline size_t bytesPerElement() const;
+
+ static bool ensureHasBuffer(JSContext* cx, Handle<TypedArrayObject*> tarray);
+
+ size_t byteLength() const { return length() * bytesPerElement(); }
+
+ size_t length() const {
+ return size_t(getFixedSlot(LENGTH_SLOT).toPrivate());
+ }
+
+ Value byteLengthValue() const {
+ size_t len = byteLength();
+ return NumberValue(len);
+ }
+
+ Value lengthValue() const {
+ size_t len = length();
+ return NumberValue(len);
+ }
+
+ bool hasInlineElements() const;
+ void setInlineElements();
+ uint8_t* elementsRaw() const {
+ return maybePtrFromReservedSlot<uint8_t>(DATA_SLOT);
+ }
+ uint8_t* elements() const {
+ assertZeroLengthArrayData();
+ return elementsRaw();
+ }
+
+#ifdef DEBUG
+ void assertZeroLengthArrayData() const;
+#else
+ void assertZeroLengthArrayData() const {};
+#endif
+
+ template <AllowGC allowGC>
+ bool getElement(JSContext* cx, size_t index,
+ typename MaybeRooted<Value, allowGC>::MutableHandleType val);
+ bool getElementPure(size_t index, Value* vp);
+
+ /*
+ * Copy all elements from this typed array to vp. vp must point to rooted
+ * memory.
+ */
+ static bool getElements(JSContext* cx, Handle<TypedArrayObject*> tarray,
+ Value* vp);
+
+ static bool GetTemplateObjectForNative(JSContext* cx, Native native,
+ const JS::HandleValueArray args,
+ MutableHandleObject res);
+
+ // Maximum allowed byte length for any typed array.
+ static constexpr size_t MaxByteLength = ArrayBufferObject::MaxByteLength;
+
+ static bool isOriginalLengthGetter(Native native);
+
+ static bool isOriginalByteOffsetGetter(Native native);
+
+ static bool isOriginalByteLengthGetter(Native native);
+
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+ static size_t objectMoved(JSObject* obj, JSObject* old);
+
+ /* Initialization bits */
+
+ static const JSFunctionSpec protoFunctions[];
+ static const JSPropertySpec protoAccessors[];
+ static const JSFunctionSpec staticFunctions[];
+ static const JSPropertySpec staticProperties[];
+
+ /* Accessors and functions */
+
+ static bool is(HandleValue v);
+
+ static bool set(JSContext* cx, unsigned argc, Value* vp);
+ static bool copyWithin(JSContext* cx, unsigned argc, Value* vp);
+
+ bool convertForSideEffect(JSContext* cx, HandleValue v) const;
+
+ private:
+ static bool set_impl(JSContext* cx, const CallArgs& args);
+ static bool copyWithin_impl(JSContext* cx, const CallArgs& args);
+};
+
+extern TypedArrayObject* NewTypedArrayWithTemplateAndLength(
+ JSContext* cx, HandleObject templateObj, int32_t len);
+
+extern TypedArrayObject* NewTypedArrayWithTemplateAndArray(
+ JSContext* cx, HandleObject templateObj, HandleObject array);
+
+extern TypedArrayObject* NewTypedArrayWithTemplateAndBuffer(
+ JSContext* cx, HandleObject templateObj, HandleObject arrayBuffer,
+ HandleValue byteOffset, HandleValue length);
+
+extern TypedArrayObject* NewUint8ArrayWithLength(
+ JSContext* cx, int32_t len, gc::Heap heap = gc::Heap::Default);
+
+inline bool IsTypedArrayClass(const JSClass* clasp) {
+ return &TypedArrayObject::classes[0] <= clasp &&
+ clasp < &TypedArrayObject::classes[Scalar::MaxTypedArrayViewType];
+}
+
+inline Scalar::Type GetTypedArrayClassType(const JSClass* clasp) {
+ MOZ_ASSERT(IsTypedArrayClass(clasp));
+ return static_cast<Scalar::Type>(clasp - &TypedArrayObject::classes[0]);
+}
+
+bool IsTypedArrayConstructor(const JSObject* obj);
+
+bool IsTypedArrayConstructor(HandleValue v, Scalar::Type type);
+
+JSNative TypedArrayConstructorNative(Scalar::Type type);
+
+// In WebIDL terminology, a BufferSource is either an ArrayBuffer or a typed
+// array view. In either case, extract the dataPointer/byteLength.
+bool IsBufferSource(JSObject* object, SharedMem<uint8_t*>* dataPointer,
+ size_t* byteLength);
+
+inline Scalar::Type TypedArrayObject::type() const {
+ return GetTypedArrayClassType(getClass());
+}
+
+inline size_t TypedArrayObject::bytesPerElement() const {
+ return Scalar::byteSize(type());
+}
+
+// ES2020 draft rev a5375bdad264c8aa264d9c44f57408087761069e
+// 7.1.16 CanonicalNumericIndexString
+//
+// Checks whether or not the string is a canonical numeric index string. If the
+// string is a canonical numeric index which is not representable as a uint64_t,
+// the returned index is UINT64_MAX.
+template <typename CharT>
+mozilla::Maybe<uint64_t> StringToTypedArrayIndex(mozilla::Range<const CharT> s);
+
+// A string |s| is a TypedArray index (or: canonical numeric index string) iff
+// |s| is "-0" or |SameValue(ToString(ToNumber(s)), s)| is true. So check for
+// any characters which can start the string representation of a number,
+// including "NaN" and "Infinity".
+template <typename CharT>
+inline bool CanStartTypedArrayIndex(CharT ch) {
+ return mozilla::IsAsciiDigit(ch) || ch == '-' || ch == 'N' || ch == 'I';
+}
+
+[[nodiscard]] inline mozilla::Maybe<uint64_t> ToTypedArrayIndex(jsid id) {
+ if (id.isInt()) {
+ int32_t i = id.toInt();
+ MOZ_ASSERT(i >= 0);
+ return mozilla::Some(i);
+ }
+
+ if (MOZ_UNLIKELY(!id.isString())) {
+ return mozilla::Nothing();
+ }
+
+ JS::AutoCheckCannotGC nogc;
+ JSAtom* atom = id.toAtom();
+
+ if (atom->empty() || !CanStartTypedArrayIndex(atom->latin1OrTwoByteChar(0))) {
+ return mozilla::Nothing();
+ }
+
+ if (atom->hasLatin1Chars()) {
+ mozilla::Range<const Latin1Char> chars = atom->latin1Range(nogc);
+ return StringToTypedArrayIndex(chars);
+ }
+
+ mozilla::Range<const char16_t> chars = atom->twoByteRange(nogc);
+ return StringToTypedArrayIndex(chars);
+}
+
+bool SetTypedArrayElement(JSContext* cx, Handle<TypedArrayObject*> obj,
+ uint64_t index, HandleValue v,
+ ObjectOpResult& result);
+
+/*
+ * Implements [[DefineOwnProperty]] for TypedArrays when the property
+ * key is a TypedArray index.
+ */
+bool DefineTypedArrayElement(JSContext* cx, Handle<TypedArrayObject*> obj,
+ uint64_t index, Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result);
+
+// Sort a typed array in ascending order. The typed array may be wrapped, but
+// must not be detached.
+bool intrinsic_TypedArrayNativeSort(JSContext* cx, unsigned argc, Value* vp);
+
+static inline constexpr unsigned TypedArrayShift(Scalar::Type viewType) {
+ switch (viewType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Uint8Clamped:
+ return 0;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ return 1;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ return 2;
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::Int64:
+ case Scalar::Float64:
+ return 3;
+ default:
+ MOZ_CRASH("Unexpected array type");
+ }
+}
+
+static inline constexpr unsigned TypedArrayElemSize(Scalar::Type viewType) {
+ return 1u << TypedArrayShift(viewType);
+}
+
+} // namespace js
+
+template <>
+inline bool JSObject::is<js::TypedArrayObject>() const {
+ return js::IsTypedArrayClass(getClass());
+}
+
+#endif /* vm_TypedArrayObject_h */
diff --git a/js/src/vm/UbiNode.cpp b/js/src/vm/UbiNode.cpp
new file mode 100644
index 0000000000..c541e933de
--- /dev/null
+++ b/js/src/vm/UbiNode.cpp
@@ -0,0 +1,527 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/UbiNode.h"
+
+#include "mozilla/Assertions.h"
+
+#include <algorithm>
+
+#include "debugger/Debugger.h"
+#include "gc/GC.h"
+#include "jit/JitCode.h"
+#include "js/Debug.h"
+#include "js/TracingAPI.h"
+#include "js/TypeDecls.h"
+#include "js/UbiNodeUtils.h"
+#include "js/Utility.h"
+#include "util/Text.h"
+#include "vm/BigIntType.h"
+#include "vm/Compartment.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/GetterSetter.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/JSScript.h"
+#include "vm/PropMap.h"
+#include "vm/Scope.h"
+#include "vm/Shape.h"
+#include "vm/StringType.h"
+#include "vm/SymbolType.h"
+
+#include "debugger/Debugger-inl.h"
+#include "gc/StableCellHasher-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+
+using JS::ApplyGCThingTyped;
+using JS::HandleValue;
+using JS::Value;
+using JS::ZoneSet;
+using JS::ubi::AtomOrTwoByteChars;
+using JS::ubi::CoarseType;
+using JS::ubi::Concrete;
+using JS::ubi::Edge;
+using JS::ubi::EdgeRange;
+using JS::ubi::EdgeVector;
+using JS::ubi::Node;
+using JS::ubi::StackFrame;
+using JS::ubi::TracerConcrete;
+using JS::ubi::TracerConcreteWithRealm;
+using mozilla::RangedPtr;
+
+struct CopyToBufferMatcher {
+ RangedPtr<char16_t> destination;
+ size_t maxLength;
+
+ CopyToBufferMatcher(RangedPtr<char16_t> destination, size_t maxLength)
+ : destination(destination), maxLength(maxLength) {}
+
+ template <typename CharT>
+ static size_t copyToBufferHelper(const CharT* src, RangedPtr<char16_t> dest,
+ size_t length) {
+ size_t i = 0;
+ for (; i < length; i++) {
+ dest[i] = src[i];
+ }
+ return i;
+ }
+
+ size_t operator()(JSAtom* atom) {
+ if (!atom) {
+ return 0;
+ }
+
+ size_t length = std::min(atom->length(), maxLength);
+ JS::AutoCheckCannotGC noGC;
+ return atom->hasTwoByteChars()
+ ? copyToBufferHelper(atom->twoByteChars(noGC), destination,
+ length)
+ : copyToBufferHelper(atom->latin1Chars(noGC), destination,
+ length);
+ }
+
+ size_t operator()(const char16_t* chars) {
+ if (!chars) {
+ return 0;
+ }
+
+ size_t length = std::min(js_strlen(chars), maxLength);
+ return copyToBufferHelper(chars, destination, length);
+ }
+};
+
+size_t JS::ubi::AtomOrTwoByteChars::copyToBuffer(
+ RangedPtr<char16_t> destination, size_t length) {
+ CopyToBufferMatcher m(destination, length);
+ return match(m);
+}
+
+struct LengthMatcher {
+ size_t operator()(JSAtom* atom) { return atom ? atom->length() : 0; }
+
+ size_t operator()(const char16_t* chars) {
+ return chars ? js_strlen(chars) : 0;
+ }
+};
+
+size_t JS::ubi::AtomOrTwoByteChars::length() {
+ LengthMatcher m;
+ return match(m);
+}
+
+size_t StackFrame::source(RangedPtr<char16_t> destination,
+ size_t length) const {
+ auto s = source();
+ return s.copyToBuffer(destination, length);
+}
+
+size_t StackFrame::functionDisplayName(RangedPtr<char16_t> destination,
+ size_t length) const {
+ auto name = functionDisplayName();
+ return name.copyToBuffer(destination, length);
+}
+
+size_t StackFrame::sourceLength() { return source().length(); }
+
+size_t StackFrame::functionDisplayNameLength() {
+ return functionDisplayName().length();
+}
+
+// All operations on null ubi::Nodes crash.
+CoarseType Concrete<void>::coarseType() const { MOZ_CRASH("null ubi::Node"); }
+const char16_t* Concrete<void>::typeName() const {
+ MOZ_CRASH("null ubi::Node");
+}
+JS::Zone* Concrete<void>::zone() const { MOZ_CRASH("null ubi::Node"); }
+JS::Compartment* Concrete<void>::compartment() const {
+ MOZ_CRASH("null ubi::Node");
+}
+JS::Realm* Concrete<void>::realm() const { MOZ_CRASH("null ubi::Node"); }
+
+UniquePtr<EdgeRange> Concrete<void>::edges(JSContext*, bool) const {
+ MOZ_CRASH("null ubi::Node");
+}
+
+Node::Size Concrete<void>::size(mozilla::MallocSizeOf mallocSizeof) const {
+ MOZ_CRASH("null ubi::Node");
+}
+
+Node::Node(JS::GCCellPtr thing) {
+ ApplyGCThingTyped(thing, [this](auto t) { this->construct(t); });
+}
+
+Node::Node(HandleValue value) {
+ if (!ApplyGCThingTyped(value, [this](auto t) { this->construct(t); })) {
+ construct<void>(nullptr);
+ }
+}
+
+static bool IsSafeToExposeToJS(JSObject* obj) {
+ if (obj->is<js::EnvironmentObject>() || obj->is<js::ScriptSourceObject>() ||
+ obj->is<js::DebugEnvironmentProxy>()) {
+ return false;
+ }
+ if (obj->is<JSFunction>() && js::IsInternalFunctionObject(*obj)) {
+ return false;
+ }
+ return true;
+}
+
+Value Node::exposeToJS() const {
+ Value v;
+
+ if (is<JSObject>()) {
+ JSObject* obj = as<JSObject>();
+ if (IsSafeToExposeToJS(obj)) {
+ v.setObject(*obj);
+ } else {
+ v.setUndefined();
+ }
+ } else if (is<JSString>()) {
+ v.setString(as<JSString>());
+ } else if (is<JS::Symbol>()) {
+ v.setSymbol(as<JS::Symbol>());
+ } else if (is<BigInt>()) {
+ v.setBigInt(as<BigInt>());
+ } else {
+ v.setUndefined();
+ }
+
+ ExposeValueToActiveJS(v);
+
+ return v;
+}
+
+// A JS::CallbackTracer subclass that adds a Edge to a Vector for each
+// edge on which it is invoked.
+class EdgeVectorTracer final : public JS::CallbackTracer {
+ // The vector to which we add Edges.
+ EdgeVector* vec;
+
+ // True if we should populate the edge's names.
+ bool wantNames;
+
+ void onChild(JS::GCCellPtr thing, const char* name) override {
+ if (!okay) {
+ return;
+ }
+
+ // Don't trace permanent atoms and well-known symbols that are owned by
+ // a parent JSRuntime.
+ if (thing.is<JSString>() && thing.as<JSString>().isPermanentAtom()) {
+ return;
+ }
+ if (thing.is<JS::Symbol>() && thing.as<JS::Symbol>().isWellKnownSymbol()) {
+ return;
+ }
+
+ char16_t* name16 = nullptr;
+ if (wantNames) {
+ // Ask the tracer to compute an edge name for us.
+ char buffer[1024];
+ context().getEdgeName(name, buffer, sizeof(buffer));
+ name = buffer;
+
+ // Convert the name to char16_t characters.
+ name16 = js_pod_malloc<char16_t>(strlen(name) + 1);
+ if (!name16) {
+ okay = false;
+ return;
+ }
+
+ size_t i;
+ for (i = 0; name[i]; i++) {
+ name16[i] = name[i];
+ }
+ name16[i] = '\0';
+ }
+
+ // The simplest code is correct! The temporary Edge takes
+ // ownership of name; if the append succeeds, the vector element
+ // then takes ownership; if the append fails, then the temporary
+ // retains it, and its destructor will free it.
+ if (!vec->append(Edge(name16, Node(thing)))) {
+ okay = false;
+ return;
+ }
+ }
+
+ public:
+ // True if no errors (OOM, say) have yet occurred.
+ bool okay;
+
+ EdgeVectorTracer(JSRuntime* rt, EdgeVector* vec, bool wantNames)
+ : JS::CallbackTracer(rt), vec(vec), wantNames(wantNames), okay(true) {}
+};
+
+template <typename Referent>
+JS::Zone* TracerConcrete<Referent>::zone() const {
+ return get().zoneFromAnyThread();
+}
+
+template JS::Zone* TracerConcrete<js::BaseScript>::zone() const;
+template JS::Zone* TracerConcrete<js::Shape>::zone() const;
+template JS::Zone* TracerConcrete<js::BaseShape>::zone() const;
+template JS::Zone* TracerConcrete<js::GetterSetter>::zone() const;
+template JS::Zone* TracerConcrete<js::PropMap>::zone() const;
+template JS::Zone* TracerConcrete<js::RegExpShared>::zone() const;
+template JS::Zone* TracerConcrete<js::Scope>::zone() const;
+template JS::Zone* TracerConcrete<JS::Symbol>::zone() const;
+template JS::Zone* TracerConcrete<BigInt>::zone() const;
+template JS::Zone* TracerConcrete<JSString>::zone() const;
+
+template <typename Referent>
+UniquePtr<EdgeRange> TracerConcrete<Referent>::edges(JSContext* cx,
+ bool wantNames) const {
+ auto range = js::MakeUnique<SimpleEdgeRange>();
+ if (!range) {
+ return nullptr;
+ }
+
+ if (!range->addTracerEdges(cx->runtime(), ptr,
+ JS::MapTypeToTraceKind<Referent>::kind,
+ wantNames)) {
+ return nullptr;
+ }
+
+ // Note: Clang 3.8 (or older) require an explicit construction of the
+ // target UniquePtr type. When we no longer require to support these Clang
+ // versions the return statement can be simplified to |return range;|.
+ return UniquePtr<EdgeRange>(range.release());
+}
+
+template UniquePtr<EdgeRange> TracerConcrete<js::BaseScript>::edges(
+ JSContext* cx, bool wantNames) const;
+template UniquePtr<EdgeRange> TracerConcrete<js::Shape>::edges(
+ JSContext* cx, bool wantNames) const;
+template UniquePtr<EdgeRange> TracerConcrete<js::BaseShape>::edges(
+ JSContext* cx, bool wantNames) const;
+template UniquePtr<EdgeRange> TracerConcrete<js::GetterSetter>::edges(
+ JSContext* cx, bool wantNames) const;
+template UniquePtr<EdgeRange> TracerConcrete<js::PropMap>::edges(
+ JSContext* cx, bool wantNames) const;
+template UniquePtr<EdgeRange> TracerConcrete<js::RegExpShared>::edges(
+ JSContext* cx, bool wantNames) const;
+template UniquePtr<EdgeRange> TracerConcrete<js::Scope>::edges(
+ JSContext* cx, bool wantNames) const;
+template UniquePtr<EdgeRange> TracerConcrete<JS::Symbol>::edges(
+ JSContext* cx, bool wantNames) const;
+template UniquePtr<EdgeRange> TracerConcrete<BigInt>::edges(
+ JSContext* cx, bool wantNames) const;
+template UniquePtr<EdgeRange> TracerConcrete<JSString>::edges(
+ JSContext* cx, bool wantNames) const;
+
+template <typename Referent>
+JS::Compartment* TracerConcreteWithRealm<Referent>::compartment() const {
+ return TracerBase::get().compartment();
+}
+
+template <typename Referent>
+Realm* TracerConcreteWithRealm<Referent>::realm() const {
+ return TracerBase::get().realm();
+}
+
+template Realm* TracerConcreteWithRealm<js::BaseScript>::realm() const;
+template JS::Compartment* TracerConcreteWithRealm<js::BaseScript>::compartment()
+ const;
+
+bool Concrete<JSObject>::hasAllocationStack() const {
+ return !!js::Debugger::getObjectAllocationSite(get());
+}
+
+StackFrame Concrete<JSObject>::allocationStack() const {
+ MOZ_ASSERT(hasAllocationStack());
+ return StackFrame(js::Debugger::getObjectAllocationSite(get()));
+}
+
+const char* Concrete<JSObject>::jsObjectClassName() const {
+ return Concrete::get().getClass()->name;
+}
+
+JS::Compartment* Concrete<JSObject>::compartment() const {
+ return Concrete::get().compartment();
+}
+
+Realm* Concrete<JSObject>::realm() const {
+ // Cross-compartment wrappers are shared by all realms in the compartment,
+ // so we return nullptr in that case.
+ return JS::GetObjectRealmOrNull(&Concrete::get());
+}
+
+const char16_t Concrete<JS::Symbol>::concreteTypeName[] = u"JS::Symbol";
+const char16_t Concrete<BigInt>::concreteTypeName[] = u"JS::BigInt";
+const char16_t Concrete<js::BaseScript>::concreteTypeName[] = u"js::BaseScript";
+const char16_t Concrete<js::jit::JitCode>::concreteTypeName[] =
+ u"js::jit::JitCode";
+const char16_t Concrete<js::Shape>::concreteTypeName[] = u"js::Shape";
+const char16_t Concrete<js::BaseShape>::concreteTypeName[] = u"js::BaseShape";
+const char16_t Concrete<js::GetterSetter>::concreteTypeName[] =
+ u"js::GetterSetter";
+const char16_t Concrete<js::PropMap>::concreteTypeName[] = u"js::PropMap";
+const char16_t Concrete<js::Scope>::concreteTypeName[] = u"js::Scope";
+const char16_t Concrete<js::RegExpShared>::concreteTypeName[] =
+ u"js::RegExpShared";
+
+namespace JS {
+namespace ubi {
+
+RootList::RootList(JSContext* cx, bool wantNames /* = false */)
+ : cx(cx), edges(), wantNames(wantNames), inited(false) {}
+
+std::pair<bool, JS::AutoCheckCannotGC> RootList::init() {
+ EdgeVectorTracer tracer(cx->runtime(), &edges, wantNames);
+ js::TraceRuntime(&tracer);
+ inited = tracer.okay;
+ return {tracer.okay, JS::AutoCheckCannotGC(cx)};
+}
+
+std::pair<bool, JS::AutoCheckCannotGC> RootList::init(
+ CompartmentSet& debuggees) {
+ EdgeVector allRootEdges;
+ EdgeVectorTracer tracer(cx->runtime(), &allRootEdges, wantNames);
+
+ ZoneSet debuggeeZones;
+ for (auto range = debuggees.all(); !range.empty(); range.popFront()) {
+ if (!debuggeeZones.put(range.front()->zone())) {
+ return {false, JS::AutoCheckCannotGC(cx)};
+ }
+ }
+
+ js::TraceRuntime(&tracer);
+ if (!tracer.okay) {
+ return {false, JS::AutoCheckCannotGC(cx)};
+ }
+ js::gc::TraceIncomingCCWs(&tracer, debuggees);
+ if (!tracer.okay) {
+ return {false, JS::AutoCheckCannotGC(cx)};
+ }
+
+ for (EdgeVector::Range r = allRootEdges.all(); !r.empty(); r.popFront()) {
+ Edge& edge = r.front();
+
+ JS::Compartment* compartment = edge.referent.compartment();
+ if (compartment && !debuggees.has(compartment)) {
+ continue;
+ }
+
+ Zone* zone = edge.referent.zone();
+ if (zone && !debuggeeZones.has(zone)) {
+ continue;
+ }
+
+ if (!edges.append(std::move(edge))) {
+ return {false, JS::AutoCheckCannotGC(cx)};
+ }
+ }
+
+ inited = true;
+ return {true, JS::AutoCheckCannotGC(cx)};
+}
+
+std::pair<bool, JS::AutoCheckCannotGC> RootList::init(HandleObject debuggees) {
+ MOZ_ASSERT(debuggees && JS::dbg::IsDebugger(*debuggees));
+ js::Debugger* dbg = js::Debugger::fromJSObject(debuggees.get());
+
+ CompartmentSet debuggeeCompartments;
+
+ for (js::WeakGlobalObjectSet::Range r = dbg->allDebuggees(); !r.empty();
+ r.popFront()) {
+ if (!debuggeeCompartments.put(r.front()->compartment())) {
+ return {false, JS::AutoCheckCannotGC(cx)};
+ }
+ }
+
+ auto [ok, nogc] = init(debuggeeCompartments);
+ if (!ok) {
+ return {false, nogc};
+ }
+
+ // Ensure that each of our debuggee globals are in the root list.
+ for (js::WeakGlobalObjectSet::Range r = dbg->allDebuggees(); !r.empty();
+ r.popFront()) {
+ if (!addRoot(JS::ubi::Node(static_cast<JSObject*>(r.front())),
+ u"debuggee global")) {
+ return {false, nogc};
+ }
+ }
+
+ inited = true;
+ return {true, nogc};
+}
+
+bool RootList::addRoot(Node node, const char16_t* edgeName) {
+ MOZ_ASSERT_IF(wantNames, edgeName);
+
+ UniqueTwoByteChars name;
+ if (edgeName) {
+ name = js::DuplicateString(edgeName);
+ if (!name) {
+ return false;
+ }
+ }
+
+ return edges.append(Edge(name.release(), node));
+}
+
+const char16_t Concrete<RootList>::concreteTypeName[] = u"JS::ubi::RootList";
+
+UniquePtr<EdgeRange> Concrete<RootList>::edges(JSContext* cx,
+ bool wantNames) const {
+ MOZ_ASSERT_IF(wantNames, get().wantNames);
+ return js::MakeUnique<PreComputedEdgeRange>(get().edges);
+}
+
+bool SimpleEdgeRange::addTracerEdges(JSRuntime* rt, void* thing,
+ JS::TraceKind kind, bool wantNames) {
+ EdgeVectorTracer tracer(rt, &edges, wantNames);
+ JS::TraceChildren(&tracer, JS::GCCellPtr(thing, kind));
+ settle();
+ return tracer.okay;
+}
+
+void Concrete<JSObject>::construct(void* storage, JSObject* ptr) {
+ if (ptr) {
+ auto clasp = ptr->getClass();
+ auto callback = ptr->compartment()
+ ->runtimeFromMainThread()
+ ->constructUbiNodeForDOMObjectCallback;
+ if (clasp->isDOMClass() && callback) {
+ AutoSuppressGCAnalysis suppress;
+ callback(storage, ptr);
+ return;
+ }
+ }
+ new (storage) Concrete(ptr);
+}
+
+void SetConstructUbiNodeForDOMObjectCallback(JSContext* cx,
+ void (*callback)(void*,
+ JSObject*)) {
+ cx->runtime()->constructUbiNodeForDOMObjectCallback = callback;
+}
+
+JS_PUBLIC_API const char* CoarseTypeToString(CoarseType type) {
+ switch (type) {
+ case CoarseType::Other:
+ return "Other";
+ case CoarseType::Object:
+ return "Object";
+ case CoarseType::Script:
+ return "Script";
+ case CoarseType::String:
+ return "String";
+ case CoarseType::DOMNode:
+ return "DOMNode";
+ default:
+ return "Unknown";
+ }
+};
+
+} // namespace ubi
+} // namespace JS
diff --git a/js/src/vm/UbiNodeCensus.cpp b/js/src/vm/UbiNodeCensus.cpp
new file mode 100644
index 0000000000..a313852f59
--- /dev/null
+++ b/js/src/vm/UbiNodeCensus.cpp
@@ -0,0 +1,1323 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/UbiNodeCensus.h"
+
+#include "builtin/MapObject.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/Printer.h"
+#include "util/Text.h"
+#include "vm/Compartment.h"
+#include "vm/JSContext.h"
+#include "vm/PlainObject.h" // js::PlainObject
+
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+
+namespace JS {
+namespace ubi {
+
+JS_PUBLIC_API void CountDeleter::operator()(CountBase* ptr) {
+ if (!ptr) {
+ return;
+ }
+
+ // Downcast to our true type and destruct, as guided by our CountType
+ // pointer.
+ ptr->destruct();
+ js_free(ptr);
+}
+
+/*** Count Types ************************************************************/
+
+// The simplest type: just count everything.
+class SimpleCount : public CountType {
+ struct Count : CountBase {
+ size_t totalBytes_;
+
+ explicit Count(SimpleCount& count) : CountBase(count), totalBytes_(0) {}
+ };
+
+ UniqueTwoByteChars label;
+ bool reportCount : 1;
+ bool reportBytes : 1;
+
+ public:
+ explicit SimpleCount(UniqueTwoByteChars& label, bool reportCount = true,
+ bool reportBytes = true)
+ : CountType(),
+ label(std::move(label)),
+ reportCount(reportCount),
+ reportBytes(reportBytes) {}
+
+ explicit SimpleCount()
+ : CountType(), label(nullptr), reportCount(true), reportBytes(true) {}
+
+ void destructCount(CountBase& countBase) override {
+ Count& count = static_cast<Count&>(countBase);
+ count.~Count();
+ }
+
+ CountBasePtr makeCount() override {
+ return CountBasePtr(js_new<Count>(*this));
+ }
+ void traceCount(CountBase& countBase, JSTracer* trc) override {}
+ bool count(CountBase& countBase, mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) override;
+ bool report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) override;
+};
+
+bool SimpleCount::count(CountBase& countBase,
+ mozilla::MallocSizeOf mallocSizeOf, const Node& node) {
+ Count& count = static_cast<Count&>(countBase);
+ if (reportBytes) {
+ count.totalBytes_ += node.size(mallocSizeOf);
+ }
+ return true;
+}
+
+bool SimpleCount::report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) {
+ Count& count = static_cast<Count&>(countBase);
+
+ Rooted<PlainObject*> obj(cx, NewPlainObject(cx));
+ if (!obj) {
+ return false;
+ }
+
+ RootedValue countValue(cx, NumberValue(count.total_));
+ if (reportCount &&
+ !DefineDataProperty(cx, obj, cx->names().count, countValue)) {
+ return false;
+ }
+
+ RootedValue bytesValue(cx, NumberValue(count.totalBytes_));
+ if (reportBytes &&
+ !DefineDataProperty(cx, obj, cx->names().bytes, bytesValue)) {
+ return false;
+ }
+
+ if (label) {
+ JSString* labelString = JS_NewUCStringCopyZ(cx, label.get());
+ if (!labelString) {
+ return false;
+ }
+ RootedValue labelValue(cx, StringValue(labelString));
+ if (!DefineDataProperty(cx, obj, cx->names().label, labelValue)) {
+ return false;
+ }
+ }
+
+ report.setObject(*obj);
+ return true;
+}
+
+// A count type that collects all matching nodes in a bucket.
+class BucketCount : public CountType {
+ struct Count : CountBase {
+ JS::ubi::Vector<JS::ubi::Node::Id> ids_;
+
+ explicit Count(BucketCount& count) : CountBase(count), ids_() {}
+ };
+
+ public:
+ explicit BucketCount() : CountType() {}
+
+ void destructCount(CountBase& countBase) override {
+ Count& count = static_cast<Count&>(countBase);
+ count.~Count();
+ }
+
+ CountBasePtr makeCount() override {
+ return CountBasePtr(js_new<Count>(*this));
+ }
+ void traceCount(CountBase& countBase, JSTracer* trc) final {}
+ bool count(CountBase& countBase, mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) override;
+ bool report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) override;
+};
+
+bool BucketCount::count(CountBase& countBase,
+ mozilla::MallocSizeOf mallocSizeOf, const Node& node) {
+ Count& count = static_cast<Count&>(countBase);
+ return count.ids_.append(node.identifier());
+}
+
+bool BucketCount::report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) {
+ Count& count = static_cast<Count&>(countBase);
+
+ size_t length = count.ids_.length();
+ Rooted<ArrayObject*> arr(cx, NewDenseFullyAllocatedArray(cx, length));
+ if (!arr) {
+ return false;
+ }
+ arr->ensureDenseInitializedLength(0, length);
+
+ for (size_t i = 0; i < length; i++) {
+ arr->setDenseElement(i, NumberValue(count.ids_[i]));
+ }
+
+ report.setObject(*arr);
+ return true;
+}
+
+// A type that categorizes nodes by their JavaScript type -- 'objects',
+// 'strings', 'scripts', 'domNode', and 'other' -- and then passes the nodes to
+// child types.
+//
+// Implementation details of scripts like jitted code are counted under
+// 'scripts'.
+class ByCoarseType : public CountType {
+ CountTypePtr objects;
+ CountTypePtr scripts;
+ CountTypePtr strings;
+ CountTypePtr other;
+ CountTypePtr domNode;
+
+ struct Count : CountBase {
+ Count(CountType& type, CountBasePtr& objects, CountBasePtr& scripts,
+ CountBasePtr& strings, CountBasePtr& other, CountBasePtr& domNode)
+ : CountBase(type),
+ objects(std::move(objects)),
+ scripts(std::move(scripts)),
+ strings(std::move(strings)),
+ other(std::move(other)),
+ domNode(std::move(domNode)) {}
+
+ CountBasePtr objects;
+ CountBasePtr scripts;
+ CountBasePtr strings;
+ CountBasePtr other;
+ CountBasePtr domNode;
+ };
+
+ public:
+ ByCoarseType(CountTypePtr& objects, CountTypePtr& scripts,
+ CountTypePtr& strings, CountTypePtr& other,
+ CountTypePtr& domNode)
+ : CountType(),
+ objects(std::move(objects)),
+ scripts(std::move(scripts)),
+ strings(std::move(strings)),
+ other(std::move(other)),
+ domNode(std::move(domNode)) {}
+
+ void destructCount(CountBase& countBase) override {
+ Count& count = static_cast<Count&>(countBase);
+ count.~Count();
+ }
+
+ CountBasePtr makeCount() override;
+ void traceCount(CountBase& countBase, JSTracer* trc) override;
+ bool count(CountBase& countBase, mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) override;
+ bool report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) override;
+};
+
+CountBasePtr ByCoarseType::makeCount() {
+ CountBasePtr objectsCount(objects->makeCount());
+ CountBasePtr scriptsCount(scripts->makeCount());
+ CountBasePtr stringsCount(strings->makeCount());
+ CountBasePtr otherCount(other->makeCount());
+ CountBasePtr domNodeCount(domNode->makeCount());
+
+ if (!objectsCount || !scriptsCount || !stringsCount || !otherCount ||
+ !domNodeCount) {
+ return CountBasePtr(nullptr);
+ }
+
+ return CountBasePtr(js_new<Count>(*this, objectsCount, scriptsCount,
+ stringsCount, otherCount, domNodeCount));
+}
+
+void ByCoarseType::traceCount(CountBase& countBase, JSTracer* trc) {
+ Count& count = static_cast<Count&>(countBase);
+ count.objects->trace(trc);
+ count.scripts->trace(trc);
+ count.strings->trace(trc);
+ count.other->trace(trc);
+ count.domNode->trace(trc);
+}
+
+bool ByCoarseType::count(CountBase& countBase,
+ mozilla::MallocSizeOf mallocSizeOf, const Node& node) {
+ Count& count = static_cast<Count&>(countBase);
+
+ switch (node.coarseType()) {
+ case JS::ubi::CoarseType::Object:
+ return count.objects->count(mallocSizeOf, node);
+ case JS::ubi::CoarseType::Script:
+ return count.scripts->count(mallocSizeOf, node);
+ case JS::ubi::CoarseType::String:
+ return count.strings->count(mallocSizeOf, node);
+ case JS::ubi::CoarseType::Other:
+ return count.other->count(mallocSizeOf, node);
+ case JS::ubi::CoarseType::DOMNode:
+ return count.domNode->count(mallocSizeOf, node);
+ default:
+ MOZ_CRASH("bad JS::ubi::CoarseType in JS::ubi::ByCoarseType::count");
+ return false;
+ }
+}
+
+bool ByCoarseType::report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) {
+ Count& count = static_cast<Count&>(countBase);
+
+ Rooted<PlainObject*> obj(cx, NewPlainObject(cx));
+ if (!obj) {
+ return false;
+ }
+
+ RootedValue objectsReport(cx);
+ if (!count.objects->report(cx, &objectsReport) ||
+ !DefineDataProperty(cx, obj, cx->names().objects, objectsReport))
+ return false;
+
+ RootedValue scriptsReport(cx);
+ if (!count.scripts->report(cx, &scriptsReport) ||
+ !DefineDataProperty(cx, obj, cx->names().scripts, scriptsReport))
+ return false;
+
+ RootedValue stringsReport(cx);
+ if (!count.strings->report(cx, &stringsReport) ||
+ !DefineDataProperty(cx, obj, cx->names().strings, stringsReport))
+ return false;
+
+ RootedValue otherReport(cx);
+ if (!count.other->report(cx, &otherReport) ||
+ !DefineDataProperty(cx, obj, cx->names().other, otherReport))
+ return false;
+ RootedValue domReport(cx);
+ if (!count.domNode->report(cx, &domReport) ||
+ !DefineDataProperty(cx, obj, cx->names().domNode, domReport))
+ return false;
+
+ report.setObject(*obj);
+ return true;
+}
+
+// Comparison function for sorting hash table entries by the smallest node ID
+// they counted. Node IDs are stable and unique, which ensures ordering of
+// results never depends on hash table placement or sort algorithm vagaries. The
+// arguments are doubly indirect: they're pointers to elements in an array of
+// pointers to table entries.
+template <typename Entry>
+static int compareEntries(const void* lhsVoid, const void* rhsVoid) {
+ auto lhs = (*static_cast<const Entry* const*>(lhsVoid))
+ ->value()
+ ->smallestNodeIdCounted_;
+ auto rhs = (*static_cast<const Entry* const*>(rhsVoid))
+ ->value()
+ ->smallestNodeIdCounted_;
+
+ // We don't want to just subtract the values, as they're unsigned.
+ if (lhs < rhs) {
+ return 1;
+ }
+ if (lhs > rhs) {
+ return -1;
+ }
+ return 0;
+}
+
+// A hash map mapping from C strings to counts.
+using CStringCountMap = HashMap<const char*, CountBasePtr,
+ mozilla::CStringHasher, SystemAllocPolicy>;
+
+// Convert a HashMap into an object with each key one of the entries from the
+// map and each value the associated count's report. For use during census
+// reporting.
+//
+// `Map` must be a `HashMap` from some key type to a `CountBasePtr`.
+//
+// `GetName` must be a callable type which takes `const Map::Key&` and returns
+// `JSAtom*`.
+template <class Map, class GetName>
+static PlainObject* countMapToObject(JSContext* cx, Map& map, GetName getName) {
+ // Build a vector of pointers to entries; sort by total; and then use
+ // that to build the result object. This makes the ordering of entries
+ // more interesting, and a little less non-deterministic.
+
+ JS::ubi::Vector<typename Map::Entry*> entries;
+ if (!entries.reserve(map.count())) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ for (auto r = map.all(); !r.empty(); r.popFront()) {
+ entries.infallibleAppend(&r.front());
+ }
+
+ if (entries.length()) {
+ qsort(entries.begin(), entries.length(), sizeof(*entries.begin()),
+ compareEntries<typename Map::Entry>);
+ }
+
+ Rooted<PlainObject*> obj(cx, NewPlainObject(cx));
+ if (!obj) {
+ return nullptr;
+ }
+
+ for (auto& entry : entries) {
+ CountBasePtr& thenCount = entry->value();
+ RootedValue thenReport(cx);
+ if (!thenCount->report(cx, &thenReport)) {
+ return nullptr;
+ }
+
+ JSAtom* atom = getName(entry->key());
+ if (!atom) {
+ return nullptr;
+ }
+
+ RootedId entryId(cx, AtomToId(atom));
+ if (!DefineDataProperty(cx, obj, entryId, thenReport)) {
+ return nullptr;
+ }
+ }
+
+ return obj;
+}
+
+// A type that categorizes nodes that are JSObjects by their class name,
+// and places all other nodes in an 'other' category.
+class ByObjectClass : public CountType {
+ // A table mapping class names to their counts. Note that we treat js::Class
+ // instances with the same name as equal keys. If you have several
+ // js::Classes with equal names (and we do; as of this writing there were
+ // six named "Object"), you will get several different js::Classes being
+ // counted in the same table entry.
+ using Table = CStringCountMap;
+ using Entry = Table::Entry;
+
+ struct Count : public CountBase {
+ Table table;
+ CountBasePtr other;
+
+ Count(CountType& type, CountBasePtr& other)
+ : CountBase(type), other(std::move(other)) {}
+ };
+
+ CountTypePtr classesType;
+ CountTypePtr otherType;
+
+ public:
+ ByObjectClass(CountTypePtr& classesType, CountTypePtr& otherType)
+ : CountType(),
+ classesType(std::move(classesType)),
+ otherType(std::move(otherType)) {}
+
+ void destructCount(CountBase& countBase) override {
+ Count& count = static_cast<Count&>(countBase);
+ count.~Count();
+ }
+
+ CountBasePtr makeCount() override;
+ void traceCount(CountBase& countBase, JSTracer* trc) override;
+ bool count(CountBase& countBase, mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) override;
+ bool report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) override;
+};
+
+CountBasePtr ByObjectClass::makeCount() {
+ CountBasePtr otherCount(otherType->makeCount());
+ if (!otherCount) {
+ return nullptr;
+ }
+
+ auto count = js::MakeUnique<Count>(*this, otherCount);
+ if (!count) {
+ return nullptr;
+ }
+
+ return CountBasePtr(count.release());
+}
+
+void ByObjectClass::traceCount(CountBase& countBase, JSTracer* trc) {
+ Count& count = static_cast<Count&>(countBase);
+ for (Table::Range r = count.table.all(); !r.empty(); r.popFront()) {
+ r.front().value()->trace(trc);
+ }
+ count.other->trace(trc);
+}
+
+bool ByObjectClass::count(CountBase& countBase,
+ mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) {
+ Count& count = static_cast<Count&>(countBase);
+
+ const char* className = node.jsObjectClassName();
+ if (!className) {
+ return count.other->count(mallocSizeOf, node);
+ }
+
+ Table::AddPtr p = count.table.lookupForAdd(className);
+ if (!p) {
+ CountBasePtr classCount(classesType->makeCount());
+ if (!classCount || !count.table.add(p, className, std::move(classCount))) {
+ return false;
+ }
+ }
+ return p->value()->count(mallocSizeOf, node);
+}
+
+bool ByObjectClass::report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) {
+ Count& count = static_cast<Count&>(countBase);
+
+ Rooted<PlainObject*> obj(
+ cx, countMapToObject(cx, count.table, [cx](const char* key) {
+ MOZ_ASSERT(key);
+ return Atomize(cx, key, strlen(key));
+ }));
+ if (!obj) {
+ return false;
+ }
+
+ RootedValue otherReport(cx);
+ if (!count.other->report(cx, &otherReport) ||
+ !DefineDataProperty(cx, obj, cx->names().other, otherReport))
+ return false;
+
+ report.setObject(*obj);
+ return true;
+}
+
+class ByDomObjectClass : public CountType {
+ // A table mapping descriptive names to their counts.
+ using UniqueC16String = JS::UniqueTwoByteChars;
+
+ struct UniqueC16StringHasher {
+ using Lookup = UniqueC16String;
+
+ static js::HashNumber hash(const Lookup& lookup) {
+ return mozilla::HashString(lookup.get());
+ }
+
+ static bool match(const UniqueC16String& key, const Lookup& lookup) {
+ return CompareChars(key.get(), js_strlen(key.get()), lookup.get(),
+ js_strlen(lookup.get())) == 0;
+ }
+ };
+
+ using Table = HashMap<UniqueC16String, CountBasePtr, UniqueC16StringHasher,
+ SystemAllocPolicy>;
+ using Entry = Table::Entry;
+
+ struct Count : public CountBase {
+ Table table;
+
+ explicit Count(CountType& type) : CountBase(type) {}
+ };
+
+ CountTypePtr classesType;
+
+ public:
+ explicit ByDomObjectClass(CountTypePtr& classesType)
+ : CountType(), classesType(std::move(classesType)) {}
+
+ void destructCount(CountBase& countBase) override {
+ Count& count = static_cast<Count&>(countBase);
+ count.~Count();
+ }
+
+ CountBasePtr makeCount() override;
+ void traceCount(CountBase& countBase, JSTracer* trc) override;
+ bool count(CountBase& countBase, mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) override;
+ bool report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) override;
+};
+
+CountBasePtr ByDomObjectClass::makeCount() {
+ auto count = js::MakeUnique<Count>(*this);
+ if (!count) {
+ return nullptr;
+ }
+
+ return CountBasePtr(count.release());
+}
+
+void ByDomObjectClass::traceCount(CountBase& countBase, JSTracer* trc) {
+ Count& count = static_cast<Count&>(countBase);
+ for (Table::Range r = count.table.all(); !r.empty(); r.popFront()) {
+ r.front().value()->trace(trc);
+ }
+}
+
+bool ByDomObjectClass::count(CountBase& countBase,
+ mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) {
+ Count& count = static_cast<Count&>(countBase);
+
+ const char16_t* nodeName = node.descriptiveTypeName();
+ if (!nodeName) {
+ return false;
+ }
+
+ UniqueC16String name = DuplicateString(nodeName);
+ if (!name) {
+ return false;
+ }
+
+ Table::AddPtr p = count.table.lookupForAdd(name);
+ if (!p) {
+ CountBasePtr classesCount(classesType->makeCount());
+ if (!classesCount ||
+ !count.table.add(p, std::move(name), std::move(classesCount))) {
+ return false;
+ }
+ }
+ return p->value()->count(mallocSizeOf, node);
+}
+
+bool ByDomObjectClass::report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) {
+ Count& count = static_cast<Count&>(countBase);
+
+ Rooted<PlainObject*> obj(
+ cx, countMapToObject(cx, count.table, [cx](const UniqueC16String& key) {
+ const char16_t* chars = key.get();
+ MOZ_ASSERT(chars);
+ return AtomizeChars(cx, chars, js_strlen(chars));
+ }));
+ if (!obj) {
+ return false;
+ }
+
+ report.setObject(*obj);
+ return true;
+}
+
+// A count type that categorizes nodes by their ubi::Node::typeName.
+class ByUbinodeType : public CountType {
+ // Note that, because ubi::Node::typeName promises to return a specific
+ // pointer, not just any string whose contents are correct, we can use their
+ // addresses as hash table keys.
+ using Table = HashMap<const char16_t*, CountBasePtr,
+ DefaultHasher<const char16_t*>, SystemAllocPolicy>;
+ using Entry = Table::Entry;
+
+ struct Count : public CountBase {
+ Table table;
+
+ explicit Count(CountType& type) : CountBase(type) {}
+ };
+
+ CountTypePtr entryType;
+
+ public:
+ explicit ByUbinodeType(CountTypePtr& entryType)
+ : CountType(), entryType(std::move(entryType)) {}
+
+ void destructCount(CountBase& countBase) override {
+ Count& count = static_cast<Count&>(countBase);
+ count.~Count();
+ }
+
+ CountBasePtr makeCount() override;
+ void traceCount(CountBase& countBase, JSTracer* trc) override;
+ bool count(CountBase& countBase, mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) override;
+ bool report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) override;
+};
+
+CountBasePtr ByUbinodeType::makeCount() {
+ auto count = js::MakeUnique<Count>(*this);
+ if (!count) {
+ return nullptr;
+ }
+
+ return CountBasePtr(count.release());
+}
+
+void ByUbinodeType::traceCount(CountBase& countBase, JSTracer* trc) {
+ Count& count = static_cast<Count&>(countBase);
+ for (Table::Range r = count.table.all(); !r.empty(); r.popFront()) {
+ r.front().value()->trace(trc);
+ }
+}
+
+bool ByUbinodeType::count(CountBase& countBase,
+ mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) {
+ Count& count = static_cast<Count&>(countBase);
+
+ const char16_t* key = node.typeName();
+ MOZ_ASSERT(key);
+ Table::AddPtr p = count.table.lookupForAdd(key);
+ if (!p) {
+ CountBasePtr typesCount(entryType->makeCount());
+ if (!typesCount || !count.table.add(p, key, std::move(typesCount))) {
+ return false;
+ }
+ }
+ return p->value()->count(mallocSizeOf, node);
+}
+
+bool ByUbinodeType::report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) {
+ Count& count = static_cast<Count&>(countBase);
+
+ // Build a vector of pointers to entries; sort by total; and then use
+ // that to build the result object. This makes the ordering of entries
+ // more interesting, and a little less non-deterministic.
+ JS::ubi::Vector<Entry*> entries;
+ if (!entries.reserve(count.table.count())) {
+ return false;
+ }
+ for (Table::Range r = count.table.all(); !r.empty(); r.popFront()) {
+ entries.infallibleAppend(&r.front());
+ }
+ if (entries.length()) {
+ qsort(entries.begin(), entries.length(), sizeof(*entries.begin()),
+ compareEntries<Entry>);
+ }
+
+ // Now build the result by iterating over the sorted vector.
+ Rooted<PlainObject*> obj(cx, NewPlainObject(cx));
+ if (!obj) {
+ return false;
+ }
+ for (Entry** entryPtr = entries.begin(); entryPtr < entries.end();
+ entryPtr++) {
+ Entry& entry = **entryPtr;
+ CountBasePtr& typeCount = entry.value();
+ RootedValue typeReport(cx);
+ if (!typeCount->report(cx, &typeReport)) {
+ return false;
+ }
+
+ const char16_t* name = entry.key();
+ MOZ_ASSERT(name);
+ JSAtom* atom = AtomizeChars(cx, name, js_strlen(name));
+ if (!atom) {
+ return false;
+ }
+ RootedId entryId(cx, AtomToId(atom));
+
+ if (!DefineDataProperty(cx, obj, entryId, typeReport)) {
+ return false;
+ }
+ }
+
+ report.setObject(*obj);
+ return true;
+}
+
+// A count type that categorizes nodes by the JS stack under which they were
+// allocated.
+class ByAllocationStack : public CountType {
+ using Table = HashMap<StackFrame, CountBasePtr, DefaultHasher<StackFrame>,
+ SystemAllocPolicy>;
+ using Entry = Table::Entry;
+
+ struct Count : public CountBase {
+ // NOTE: You may look up entries in this table by JS::ubi::StackFrame
+ // key only during traversal, NOT ONCE TRAVERSAL IS COMPLETE. Once
+ // traversal is complete, you may only iterate over it.
+ //
+ // In this hash table, keys are JSObjects (with some indirection), and
+ // we use JSObject identity (that is, address identity) as key
+ // identity. The normal way to support such a table is to make the trace
+ // function notice keys that have moved and re-key them in the
+ // table. However, our trace function does *not* rehash; the first GC
+ // may render the hash table unsearchable.
+ //
+ // This is as it should be:
+ //
+ // First, the heap traversal phase needs lookups by key to work. But no
+ // GC may ever occur during a traversal; this is enforced by the
+ // JS::ubi::BreadthFirst template. So the traceCount function doesn't
+ // need to do anything to help traversal; it never even runs then.
+ //
+ // Second, the report phase needs iteration over the table to work, but
+ // never looks up entries by key. GC may well occur during this phase:
+ // we allocate a Map object, and probably cross-compartment wrappers for
+ // SavedFrame instances as well. If a GC were to occur, it would call
+ // our traceCount function; if traceCount were to re-key, that would
+ // ruin the traversal in progress.
+ //
+ // So depending on the phase, we either don't need re-keying, or
+ // can't abide it.
+ Table table;
+ CountBasePtr noStack;
+
+ Count(CountType& type, CountBasePtr& noStack)
+ : CountBase(type), noStack(std::move(noStack)) {}
+ };
+
+ CountTypePtr entryType;
+ CountTypePtr noStackType;
+
+ public:
+ ByAllocationStack(CountTypePtr& entryType, CountTypePtr& noStackType)
+ : CountType(),
+ entryType(std::move(entryType)),
+ noStackType(std::move(noStackType)) {}
+
+ void destructCount(CountBase& countBase) override {
+ Count& count = static_cast<Count&>(countBase);
+ count.~Count();
+ }
+
+ CountBasePtr makeCount() override;
+ void traceCount(CountBase& countBase, JSTracer* trc) override;
+ bool count(CountBase& countBase, mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) override;
+ bool report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) override;
+};
+
+CountBasePtr ByAllocationStack::makeCount() {
+ CountBasePtr noStackCount(noStackType->makeCount());
+ if (!noStackCount) {
+ return nullptr;
+ }
+
+ auto count = js::MakeUnique<Count>(*this, noStackCount);
+ if (!count) {
+ return nullptr;
+ }
+ return CountBasePtr(count.release());
+}
+
+void ByAllocationStack::traceCount(CountBase& countBase, JSTracer* trc) {
+ Count& count = static_cast<Count&>(countBase);
+ for (Table::Range r = count.table.all(); !r.empty(); r.popFront()) {
+ // Trace our child Counts.
+ r.front().value()->trace(trc);
+
+ // Trace the StackFrame that is this entry's key. Do not re-key if
+ // it has moved; see comments for ByAllocationStack::Count::table.
+ const StackFrame* key = &r.front().key();
+ auto& k = *const_cast<StackFrame*>(key);
+ k.trace(trc);
+ }
+ count.noStack->trace(trc);
+}
+
+bool ByAllocationStack::count(CountBase& countBase,
+ mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) {
+ Count& count = static_cast<Count&>(countBase);
+
+ // If we do have an allocation stack for this node, include it in the
+ // count for that stack.
+ if (node.hasAllocationStack()) {
+ auto allocationStack = node.allocationStack();
+ auto p = count.table.lookupForAdd(allocationStack);
+ if (!p) {
+ CountBasePtr stackCount(entryType->makeCount());
+ if (!stackCount ||
+ !count.table.add(p, allocationStack, std::move(stackCount))) {
+ return false;
+ }
+ }
+ MOZ_ASSERT(p);
+ return p->value()->count(mallocSizeOf, node);
+ }
+
+ // Otherwise, count it in the "no stack" category.
+ return count.noStack->count(mallocSizeOf, node);
+}
+
+bool ByAllocationStack::report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) {
+ Count& count = static_cast<Count&>(countBase);
+
+#ifdef DEBUG
+ // Check that nothing rehashes our table while we hold pointers into it.
+ mozilla::Generation generation = count.table.generation();
+#endif
+
+ // Build a vector of pointers to entries; sort by total; and then use
+ // that to build the result object. This makes the ordering of entries
+ // more interesting, and a little less non-deterministic.
+ JS::ubi::Vector<Entry*> entries;
+ if (!entries.reserve(count.table.count())) {
+ return false;
+ }
+ for (Table::Range r = count.table.all(); !r.empty(); r.popFront()) {
+ entries.infallibleAppend(&r.front());
+ }
+ if (entries.length()) {
+ qsort(entries.begin(), entries.length(), sizeof(*entries.begin()),
+ compareEntries<Entry>);
+ }
+
+ // Now build the result by iterating over the sorted vector.
+ Rooted<MapObject*> map(cx, MapObject::create(cx));
+ if (!map) {
+ return false;
+ }
+ for (Entry** entryPtr = entries.begin(); entryPtr < entries.end();
+ entryPtr++) {
+ Entry& entry = **entryPtr;
+ MOZ_ASSERT(entry.key());
+
+ RootedObject stack(cx);
+ if (!entry.key().constructSavedFrameStack(cx, &stack) ||
+ !cx->compartment()->wrap(cx, &stack)) {
+ return false;
+ }
+ RootedValue stackVal(cx, ObjectValue(*stack));
+
+ CountBasePtr& stackCount = entry.value();
+ RootedValue stackReport(cx);
+ if (!stackCount->report(cx, &stackReport)) {
+ return false;
+ }
+
+ if (!MapObject::set(cx, map, stackVal, stackReport)) {
+ return false;
+ }
+ }
+
+ if (count.noStack->total_ > 0) {
+ RootedValue noStackReport(cx);
+ if (!count.noStack->report(cx, &noStackReport)) {
+ return false;
+ }
+ RootedValue noStack(cx, StringValue(cx->names().noStack));
+ if (!MapObject::set(cx, map, noStack, noStackReport)) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(generation == count.table.generation());
+
+ report.setObject(*map);
+ return true;
+}
+
+// A count type that categorizes nodes by their script's filename.
+class ByFilename : public CountType {
+ using UniqueCString = JS::UniqueChars;
+
+ struct UniqueCStringHasher {
+ using Lookup = UniqueCString;
+
+ static js::HashNumber hash(const Lookup& lookup) {
+ return mozilla::CStringHasher::hash(lookup.get());
+ }
+
+ static bool match(const UniqueCString& key, const Lookup& lookup) {
+ return mozilla::CStringHasher::match(key.get(), lookup.get());
+ }
+ };
+
+ // A table mapping filenames to their counts. Note that we treat scripts
+ // with the same filename as equivalent. If you have several sources with
+ // the same filename, then all their scripts will get bucketed together.
+ using Table = HashMap<UniqueCString, CountBasePtr, UniqueCStringHasher,
+ SystemAllocPolicy>;
+ using Entry = Table::Entry;
+
+ struct Count : public CountBase {
+ Table table;
+ CountBasePtr then;
+ CountBasePtr noFilename;
+
+ Count(CountType& type, CountBasePtr&& then, CountBasePtr&& noFilename)
+ : CountBase(type),
+ then(std::move(then)),
+ noFilename(std::move(noFilename)) {}
+ };
+
+ CountTypePtr thenType;
+ CountTypePtr noFilenameType;
+
+ public:
+ ByFilename(CountTypePtr&& thenType, CountTypePtr&& noFilenameType)
+ : CountType(),
+ thenType(std::move(thenType)),
+ noFilenameType(std::move(noFilenameType)) {}
+
+ void destructCount(CountBase& countBase) override {
+ Count& count = static_cast<Count&>(countBase);
+ count.~Count();
+ }
+
+ CountBasePtr makeCount() override;
+ void traceCount(CountBase& countBase, JSTracer* trc) override;
+ bool count(CountBase& countBase, mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) override;
+ bool report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) override;
+};
+
+CountBasePtr ByFilename::makeCount() {
+ CountBasePtr thenCount(thenType->makeCount());
+ if (!thenCount) {
+ return nullptr;
+ }
+
+ CountBasePtr noFilenameCount(noFilenameType->makeCount());
+ if (!noFilenameCount) {
+ return nullptr;
+ }
+
+ auto count = js::MakeUnique<Count>(*this, std::move(thenCount),
+ std::move(noFilenameCount));
+ if (!count) {
+ return nullptr;
+ }
+
+ return CountBasePtr(count.release());
+}
+
+void ByFilename::traceCount(CountBase& countBase, JSTracer* trc) {
+ Count& count = static_cast<Count&>(countBase);
+ for (Table::Range r = count.table.all(); !r.empty(); r.popFront()) {
+ r.front().value()->trace(trc);
+ }
+ count.noFilename->trace(trc);
+}
+
+bool ByFilename::count(CountBase& countBase, mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) {
+ Count& count = static_cast<Count&>(countBase);
+
+ const char* filename = node.scriptFilename();
+ if (!filename) {
+ return count.noFilename->count(mallocSizeOf, node);
+ }
+
+ UniqueCString myFilename = DuplicateString(filename);
+ if (!myFilename) {
+ return false;
+ }
+
+ Table::AddPtr p = count.table.lookupForAdd(myFilename);
+ if (!p) {
+ CountBasePtr thenCount(thenType->makeCount());
+ if (!thenCount ||
+ !count.table.add(p, std::move(myFilename), std::move(thenCount))) {
+ return false;
+ }
+ }
+ return p->value()->count(mallocSizeOf, node);
+}
+
+bool ByFilename::report(JSContext* cx, CountBase& countBase,
+ MutableHandleValue report) {
+ Count& count = static_cast<Count&>(countBase);
+
+ Rooted<PlainObject*> obj(
+ cx, countMapToObject(cx, count.table, [cx](const UniqueCString& key) {
+ const char* utf8chars = key.get();
+ return AtomizeUTF8Chars(cx, utf8chars, strlen(utf8chars));
+ }));
+ if (!obj) {
+ return false;
+ }
+
+ RootedValue noFilenameReport(cx);
+ if (!count.noFilename->report(cx, &noFilenameReport) ||
+ !DefineDataProperty(cx, obj, cx->names().noFilename, noFilenameReport)) {
+ return false;
+ }
+
+ report.setObject(*obj);
+ return true;
+}
+
+/*** Census Handler *********************************************************/
+
+JS_PUBLIC_API bool CensusHandler::operator()(
+ BreadthFirst<CensusHandler>& traversal, Node origin, const Edge& edge,
+ NodeData* referentData, bool first) {
+ // We're only interested in the first time we reach edge.referent, not
+ // in every edge arriving at that node.
+ if (!first) {
+ return true;
+ }
+
+ // Don't count nodes outside the debuggee zones. Do count things in the
+ // special atoms zone, but don't traverse their outgoing edges, on the
+ // assumption that they are shared resources that debuggee is using.
+ // Symbols are always allocated in the atoms zone, even if they were
+ // created for exactly one compartment and never shared; this rule will
+ // include such nodes in the count.
+ const Node& referent = edge.referent;
+ Zone* zone = referent.zone();
+
+ if (census.targetZones.count() == 0 || census.targetZones.has(zone)) {
+ return rootCount->count(mallocSizeOf, referent);
+ }
+
+ if (zone && zone->isAtomsZone()) {
+ traversal.abandonReferent();
+ return rootCount->count(mallocSizeOf, referent);
+ }
+
+ traversal.abandonReferent();
+ return true;
+}
+
+/*** Parsing Breakdowns *****************************************************/
+
+static CountTypePtr ParseChildBreakdown(JSContext* cx, HandleObject breakdown,
+ PropertyName* prop) {
+ RootedValue v(cx);
+ if (!GetProperty(cx, breakdown, breakdown, prop, &v)) {
+ return nullptr;
+ }
+ return ParseBreakdown(cx, v);
+}
+
+JS_PUBLIC_API CountTypePtr ParseBreakdown(JSContext* cx,
+ HandleValue breakdownValue) {
+ if (breakdownValue.isUndefined()) {
+ // Construct the default type, { by: 'count' }
+ CountTypePtr simple(cx->new_<SimpleCount>());
+ return simple;
+ }
+
+ RootedObject breakdown(cx, ToObject(cx, breakdownValue));
+ if (!breakdown) {
+ return nullptr;
+ }
+
+ RootedValue byValue(cx);
+ if (!GetProperty(cx, breakdown, breakdown, cx->names().by, &byValue)) {
+ return nullptr;
+ }
+ RootedString byString(cx, ToString(cx, byValue));
+ if (!byString) {
+ return nullptr;
+ }
+ Rooted<JSLinearString*> by(cx, byString->ensureLinear(cx));
+ if (!by) {
+ return nullptr;
+ }
+
+ if (StringEqualsLiteral(by, "count")) {
+ RootedValue countValue(cx), bytesValue(cx);
+ if (!GetProperty(cx, breakdown, breakdown, cx->names().count,
+ &countValue) ||
+ !GetProperty(cx, breakdown, breakdown, cx->names().bytes, &bytesValue))
+ return nullptr;
+
+ // Both 'count' and 'bytes' default to true if omitted, but ToBoolean
+ // naturally treats 'undefined' as false; fix this up.
+ if (countValue.isUndefined()) countValue.setBoolean(true);
+ if (bytesValue.isUndefined()) bytesValue.setBoolean(true);
+
+ // Undocumented feature, for testing: { by: 'count' } breakdowns can have
+ // a 'label' property whose value is converted to a string and included as
+ // a 'label' property on the report object.
+ RootedValue label(cx);
+ if (!GetProperty(cx, breakdown, breakdown, cx->names().label, &label)) {
+ return nullptr;
+ }
+
+ UniqueTwoByteChars labelUnique(nullptr);
+ if (!label.isUndefined()) {
+ RootedString labelString(cx, ToString(cx, label));
+ if (!labelString) {
+ return nullptr;
+ }
+
+ labelUnique = JS_CopyStringCharsZ(cx, labelString);
+ if (!labelUnique) {
+ return nullptr;
+ }
+ }
+
+ CountTypePtr simple(cx->new_<SimpleCount>(
+ labelUnique, ToBoolean(countValue), ToBoolean(bytesValue)));
+ return simple;
+ }
+
+ if (StringEqualsLiteral(by, "bucket")) {
+ return CountTypePtr(cx->new_<BucketCount>());
+ }
+
+ if (StringEqualsLiteral(by, "objectClass")) {
+ CountTypePtr thenType(ParseChildBreakdown(cx, breakdown, cx->names().then));
+ if (!thenType) {
+ return nullptr;
+ }
+
+ CountTypePtr otherType(
+ ParseChildBreakdown(cx, breakdown, cx->names().other));
+ if (!otherType) {
+ return nullptr;
+ }
+
+ return CountTypePtr(cx->new_<ByObjectClass>(thenType, otherType));
+ }
+
+ if (StringEqualsLiteral(by, "coarseType")) {
+ CountTypePtr objectsType(
+ ParseChildBreakdown(cx, breakdown, cx->names().objects));
+ if (!objectsType) {
+ return nullptr;
+ }
+ CountTypePtr scriptsType(
+ ParseChildBreakdown(cx, breakdown, cx->names().scripts));
+ if (!scriptsType) {
+ return nullptr;
+ }
+ CountTypePtr stringsType(
+ ParseChildBreakdown(cx, breakdown, cx->names().strings));
+ if (!stringsType) {
+ return nullptr;
+ }
+ CountTypePtr otherType(
+ ParseChildBreakdown(cx, breakdown, cx->names().other));
+ if (!otherType) {
+ return nullptr;
+ }
+ CountTypePtr domNodeType(
+ ParseChildBreakdown(cx, breakdown, cx->names().domNode));
+ if (!domNodeType) {
+ return nullptr;
+ }
+
+ return CountTypePtr(cx->new_<ByCoarseType>(
+ objectsType, scriptsType, stringsType, otherType, domNodeType));
+ }
+
+ if (StringEqualsLiteral(by, "internalType")) {
+ CountTypePtr thenType(ParseChildBreakdown(cx, breakdown, cx->names().then));
+ if (!thenType) {
+ return nullptr;
+ }
+
+ return CountTypePtr(cx->new_<ByUbinodeType>(thenType));
+ }
+
+ if (StringEqualsLiteral(by, "descriptiveType")) {
+ CountTypePtr thenType(ParseChildBreakdown(cx, breakdown, cx->names().then));
+ if (!thenType) {
+ return nullptr;
+ }
+ return CountTypePtr(cx->new_<ByDomObjectClass>(thenType));
+ }
+
+ if (StringEqualsLiteral(by, "allocationStack")) {
+ CountTypePtr thenType(ParseChildBreakdown(cx, breakdown, cx->names().then));
+ if (!thenType) {
+ return nullptr;
+ }
+ CountTypePtr noStackType(
+ ParseChildBreakdown(cx, breakdown, cx->names().noStack));
+ if (!noStackType) {
+ return nullptr;
+ }
+
+ return CountTypePtr(cx->new_<ByAllocationStack>(thenType, noStackType));
+ }
+
+ if (StringEqualsLiteral(by, "filename")) {
+ CountTypePtr thenType(ParseChildBreakdown(cx, breakdown, cx->names().then));
+ if (!thenType) {
+ return nullptr;
+ }
+
+ CountTypePtr noFilenameType(
+ ParseChildBreakdown(cx, breakdown, cx->names().noFilename));
+ if (!noFilenameType) {
+ return nullptr;
+ }
+
+ return CountTypePtr(
+ cx->new_<ByFilename>(std::move(thenType), std::move(noFilenameType)));
+ }
+
+ // We didn't recognize the breakdown type; complain.
+ UniqueChars byBytes = QuoteString(cx, by, '"');
+ if (!byBytes) {
+ return nullptr;
+ }
+
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_DEBUG_CENSUS_BREAKDOWN, byBytes.get());
+ return nullptr;
+}
+
+// Get the default census breakdown:
+//
+// { by: "coarseType",
+// objects: { by: "objectClass" },
+// other: { by: "internalType" },
+// domNode: { by: "descriptiveType" }
+// }
+static CountTypePtr GetDefaultBreakdown(JSContext* cx) {
+ CountTypePtr byDomClass(cx->new_<SimpleCount>());
+ if (!byDomClass) {
+ return nullptr;
+ }
+ CountTypePtr byClass(cx->new_<SimpleCount>());
+ if (!byClass) {
+ return nullptr;
+ }
+
+ CountTypePtr byClassElse(cx->new_<SimpleCount>());
+ if (!byClassElse) {
+ return nullptr;
+ }
+
+ CountTypePtr objects(cx->new_<ByObjectClass>(byClass, byClassElse));
+ if (!objects) {
+ return nullptr;
+ }
+
+ CountTypePtr scripts(cx->new_<SimpleCount>());
+ if (!scripts) {
+ return nullptr;
+ }
+
+ CountTypePtr strings(cx->new_<SimpleCount>());
+ if (!strings) {
+ return nullptr;
+ }
+
+ CountTypePtr byType(cx->new_<SimpleCount>());
+ if (!byType) {
+ return nullptr;
+ }
+
+ CountTypePtr other(cx->new_<ByUbinodeType>(byType));
+ if (!other) {
+ return nullptr;
+ }
+ CountTypePtr domNode(cx->new_<ByDomObjectClass>(byDomClass));
+ if (!domNode) {
+ return nullptr;
+ }
+
+ return CountTypePtr(
+ cx->new_<ByCoarseType>(objects, scripts, strings, other, domNode));
+}
+
+JS_PUBLIC_API bool ParseCensusOptions(JSContext* cx, Census& census,
+ HandleObject options,
+ CountTypePtr& outResult) {
+ RootedValue breakdown(cx, UndefinedValue());
+ if (options &&
+ !GetProperty(cx, options, options, cx->names().breakdown, &breakdown)) {
+ return false;
+ }
+
+ outResult = breakdown.isUndefined() ? GetDefaultBreakdown(cx)
+ : ParseBreakdown(cx, breakdown);
+ return !!outResult;
+}
+
+} // namespace ubi
+} // namespace JS
diff --git a/js/src/vm/UbiNodeShortestPaths.cpp b/js/src/vm/UbiNodeShortestPaths.cpp
new file mode 100644
index 0000000000..f11cf19035
--- /dev/null
+++ b/js/src/vm/UbiNodeShortestPaths.cpp
@@ -0,0 +1,105 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/UbiNodeShortestPaths.h"
+
+#include "mozilla/Maybe.h"
+
+#include <stdio.h>
+#include <utility>
+
+#include "util/Text.h"
+
+namespace JS {
+namespace ubi {
+
+JS_PUBLIC_API BackEdge::Ptr BackEdge::clone() const {
+ auto clone = js::MakeUnique<BackEdge>();
+ if (!clone) {
+ return nullptr;
+ }
+
+ clone->predecessor_ = predecessor();
+ if (name()) {
+ clone->name_ = js::DuplicateString(name().get());
+ if (!clone->name_) {
+ return nullptr;
+ }
+ }
+ return clone;
+}
+
+#ifdef DEBUG
+
+static int32_t js_fputs(const char16_t* s, FILE* f) {
+ while (*s != 0) {
+ if (fputwc(wchar_t(*s), f) == static_cast<wint_t>(WEOF)) {
+ return WEOF;
+ }
+ s++;
+ }
+ return 1;
+}
+
+static void dumpNode(const JS::ubi::Node& node) {
+ fprintf(stderr, " %p ", (void*)node.identifier());
+ js_fputs(node.typeName(), stderr);
+ if (node.coarseType() == JS::ubi::CoarseType::Object) {
+ if (const char* clsName = node.jsObjectClassName()) {
+ fprintf(stderr, " [object %s]", clsName);
+ }
+ }
+ fputc('\n', stderr);
+}
+
+JS_PUBLIC_API void dumpPaths(JSContext* cx, Node node,
+ uint32_t maxNumPaths /* = 10 */) {
+ JS::ubi::RootList rootList(cx, true);
+ auto [ok, nogc] = rootList.init();
+ MOZ_ASSERT(ok);
+
+ NodeSet targets;
+ ok = targets.putNew(node);
+ MOZ_ASSERT(ok);
+
+ auto paths = ShortestPaths::Create(cx, nogc, maxNumPaths, &rootList,
+ std::move(targets));
+ MOZ_ASSERT(paths.isSome());
+
+ int i = 0;
+ ok = paths->forEachPath(node, [&](Path& path) {
+ fprintf(stderr, "Path %d:\n", i++);
+ for (auto backEdge : path) {
+ dumpNode(backEdge->predecessor());
+ fprintf(stderr, " |\n");
+ fprintf(stderr, " |\n");
+ fprintf(stderr, " '");
+
+ const char16_t* name = backEdge->name().get();
+ if (!name) {
+ name = u"<no edge name>";
+ }
+ js_fputs(name, stderr);
+ fprintf(stderr, "'\n");
+
+ fprintf(stderr, " |\n");
+ fprintf(stderr, " V\n");
+ }
+
+ dumpNode(node);
+ fputc('\n', stderr);
+ return true;
+ });
+ MOZ_ASSERT(ok);
+
+ if (i == 0) {
+ fprintf(stderr, "No retaining paths found.\n");
+ }
+}
+#endif
+
+} // namespace ubi
+} // namespace JS
diff --git a/js/src/vm/Uint8Clamped.h b/js/src/vm/Uint8Clamped.h
new file mode 100644
index 0000000000..5cc391f4b7
--- /dev/null
+++ b/js/src/vm/Uint8Clamped.h
@@ -0,0 +1,121 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Uint8Clamped_h
+#define vm_Uint8Clamped_h
+
+#include <stdint.h>
+
+namespace js {
+
+extern uint32_t ClampDoubleToUint8(const double x);
+
+struct uint8_clamped {
+ uint8_t val;
+
+ uint8_clamped() = default;
+ uint8_clamped(const uint8_clamped& other) = default;
+
+ // invoke our assignment helpers for constructor conversion
+ explicit uint8_clamped(uint8_t x) { *this = x; }
+ explicit uint8_clamped(uint16_t x) { *this = x; }
+ explicit uint8_clamped(uint32_t x) { *this = x; }
+ explicit uint8_clamped(uint64_t x) { *this = x; }
+ explicit uint8_clamped(int8_t x) { *this = x; }
+ explicit uint8_clamped(int16_t x) { *this = x; }
+ explicit uint8_clamped(int32_t x) { *this = x; }
+ explicit uint8_clamped(int64_t x) { *this = x; }
+ explicit uint8_clamped(double x) { *this = x; }
+
+ uint8_clamped& operator=(const uint8_clamped& x) = default;
+
+ uint8_clamped& operator=(uint8_t x) {
+ val = x;
+ return *this;
+ }
+
+ uint8_clamped& operator=(uint16_t x) {
+ val = (x > 255) ? 255 : uint8_t(x);
+ return *this;
+ }
+
+ uint8_clamped& operator=(uint32_t x) {
+ val = (x > 255) ? 255 : uint8_t(x);
+ return *this;
+ }
+
+ uint8_clamped& operator=(uint64_t x) {
+ val = (x > 255) ? 255 : uint8_t(x);
+ return *this;
+ }
+
+ uint8_clamped& operator=(int8_t x) {
+ val = (x >= 0) ? uint8_t(x) : 0;
+ return *this;
+ }
+
+ uint8_clamped& operator=(int16_t x) {
+ val = (x >= 0) ? ((x < 255) ? uint8_t(x) : 255) : 0;
+ return *this;
+ }
+
+ uint8_clamped& operator=(int32_t x) {
+ val = (x >= 0) ? ((x < 255) ? uint8_t(x) : 255) : 0;
+ return *this;
+ }
+
+ uint8_clamped& operator=(int64_t x) {
+ val = (x >= 0) ? ((x < 255) ? uint8_t(x) : 255) : 0;
+ return *this;
+ }
+
+ uint8_clamped& operator=(const double x) {
+ val = uint8_t(ClampDoubleToUint8(x));
+ return *this;
+ }
+
+ operator uint8_t() const { return val; }
+
+ void staticAsserts() {
+ static_assert(sizeof(uint8_clamped) == 1,
+ "uint8_clamped must be layout-compatible with uint8_t");
+ }
+};
+
+/* Note that we can't use std::numeric_limits here due to uint8_clamped. */
+template <typename T>
+inline constexpr bool TypeIsFloatingPoint() {
+ return false;
+}
+template <>
+inline constexpr bool TypeIsFloatingPoint<float>() {
+ return true;
+}
+template <>
+inline constexpr bool TypeIsFloatingPoint<double>() {
+ return true;
+}
+
+template <typename T>
+inline constexpr bool TypeIsUnsigned() {
+ return false;
+}
+template <>
+inline constexpr bool TypeIsUnsigned<uint8_t>() {
+ return true;
+}
+template <>
+inline constexpr bool TypeIsUnsigned<uint16_t>() {
+ return true;
+}
+template <>
+inline constexpr bool TypeIsUnsigned<uint32_t>() {
+ return true;
+}
+
+} // namespace js
+
+#endif // vm_Uint8Clamped_h
diff --git a/js/src/vm/UsageStatistics.cpp b/js/src/vm/UsageStatistics.cpp
new file mode 100644
index 0000000000..a3574d5154
--- /dev/null
+++ b/js/src/vm/UsageStatistics.cpp
@@ -0,0 +1,20 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/friend/UsageStatistics.h"
+
+#include "vm/JSContext.h" // JSContext
+#include "vm/Runtime.h" // JSRuntime
+
+void JS_SetAccumulateTelemetryCallback(
+ JSContext* cx, JSAccumulateTelemetryDataCallback callback) {
+ cx->runtime()->setTelemetryCallback(cx->runtime(), callback);
+}
+
+void JS_SetSetUseCounterCallback(JSContext* cx,
+ JSSetUseCounterCallback callback) {
+ cx->runtime()->setUseCounterCallback(cx->runtime(), callback);
+}
diff --git a/js/src/vm/Value.cpp b/js/src/vm/Value.cpp
new file mode 100644
index 0000000000..94f5f86731
--- /dev/null
+++ b/js/src/vm/Value.cpp
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/Value.h"
+
+#include "mozilla/Assertions.h"
+
+#include <inttypes.h>
+
+static const JS::Value JSVAL_NULL =
+ JS::Value::fromTagAndPayload(JSVAL_TAG_NULL, 0);
+static const JS::Value JSVAL_FALSE =
+ JS::Value::fromTagAndPayload(JSVAL_TAG_BOOLEAN, false);
+static const JS::Value JSVAL_TRUE =
+ JS::Value::fromTagAndPayload(JSVAL_TAG_BOOLEAN, true);
+static const JS::Value JSVAL_VOID =
+ JS::Value::fromTagAndPayload(JSVAL_TAG_UNDEFINED, 0);
+static const mozilla::Maybe<JS::Value> JSVAL_NOTHING;
+
+namespace JS {
+
+const HandleValue NullHandleValue =
+ HandleValue::fromMarkedLocation(&JSVAL_NULL);
+const HandleValue UndefinedHandleValue =
+ HandleValue::fromMarkedLocation(&JSVAL_VOID);
+const HandleValue TrueHandleValue =
+ HandleValue::fromMarkedLocation(&JSVAL_TRUE);
+const HandleValue FalseHandleValue =
+ HandleValue::fromMarkedLocation(&JSVAL_FALSE);
+const Handle<mozilla::Maybe<Value>> NothingHandleValue =
+ Handle<mozilla::Maybe<Value>>::fromMarkedLocation(&JSVAL_NOTHING);
+
+} // namespace JS
+
+void js::ReportBadValueTypeAndCrash(const JS::Value& value) {
+ MOZ_CRASH_UNSAFE_PRINTF("JS::Value has illegal type: 0x%" PRIx64,
+ value.asRawBits());
+}
diff --git a/js/src/vm/Warnings.cpp b/js/src/vm/Warnings.cpp
new file mode 100644
index 0000000000..d73e4f1116
--- /dev/null
+++ b/js/src/vm/Warnings.cpp
@@ -0,0 +1,105 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/Warnings.h"
+#include "vm/Warnings.h"
+
+#include <stdarg.h> // va_{list,start,end}
+
+#include "jstypes.h" // JS_PUBLIC_API
+
+#include "js/Context.h" // js::AssertHeapIsIdle
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage
+#include "vm/ErrorReporting.h" // IsWarning
+#include "vm/JSContext.h" // js::ArgumentsAre{ASCII,Latin1,UTF8}, js::ReportError{Number}VA
+
+using js::ArgumentsAreASCII;
+using js::ArgumentsAreLatin1;
+using js::ArgumentsAreUTF8;
+using js::AssertHeapIsIdle;
+using js::GetErrorMessage;
+using js::IsWarning;
+using js::ReportErrorVA;
+
+JS_PUBLIC_API bool JS::WarnASCII(JSContext* cx, const char* format, ...) {
+ va_list ap;
+ bool ok;
+
+ AssertHeapIsIdle();
+ va_start(ap, format);
+ ok = ReportErrorVA(cx, IsWarning::Yes, format, ArgumentsAreASCII, ap);
+ va_end(ap);
+ return ok;
+}
+
+JS_PUBLIC_API bool JS::WarnLatin1(JSContext* cx, const char* format, ...) {
+ va_list ap;
+ bool ok;
+
+ AssertHeapIsIdle();
+ va_start(ap, format);
+ ok = ReportErrorVA(cx, IsWarning::Yes, format, ArgumentsAreLatin1, ap);
+ va_end(ap);
+ return ok;
+}
+
+JS_PUBLIC_API bool JS::WarnUTF8(JSContext* cx, const char* format, ...) {
+ va_list ap;
+ bool ok;
+
+ AssertHeapIsIdle();
+ va_start(ap, format);
+ ok = ReportErrorVA(cx, IsWarning::Yes, format, ArgumentsAreUTF8, ap);
+ va_end(ap);
+ return ok;
+}
+
+JS_PUBLIC_API JS::WarningReporter JS::GetWarningReporter(JSContext* cx) {
+ return cx->runtime()->warningReporter;
+}
+
+JS_PUBLIC_API JS::WarningReporter JS::SetWarningReporter(
+ JSContext* cx, WarningReporter reporter) {
+ WarningReporter older = cx->runtime()->warningReporter;
+ cx->runtime()->warningReporter = reporter;
+ return older;
+}
+
+bool js::WarnNumberASCII(JSContext* cx, const unsigned errorNumber, ...) {
+ va_list ap;
+ va_start(ap, errorNumber);
+ bool ok = ReportErrorNumberVA(cx, IsWarning::Yes, GetErrorMessage, nullptr,
+ errorNumber, ArgumentsAreASCII, ap);
+ va_end(ap);
+ return ok;
+}
+
+bool js::WarnNumberLatin1(JSContext* cx, const unsigned errorNumber, ...) {
+ va_list ap;
+ va_start(ap, errorNumber);
+ bool ok = ReportErrorNumberVA(cx, IsWarning::Yes, GetErrorMessage, nullptr,
+ errorNumber, ArgumentsAreLatin1, ap);
+ va_end(ap);
+ return ok;
+}
+
+bool js::WarnNumberUTF8(JSContext* cx, const unsigned errorNumber, ...) {
+ va_list ap;
+ va_start(ap, errorNumber);
+ bool ok = ReportErrorNumberVA(cx, IsWarning::Yes, GetErrorMessage, nullptr,
+ errorNumber, ArgumentsAreUTF8, ap);
+ va_end(ap);
+ return ok;
+}
+
+bool js::WarnNumberUC(JSContext* cx, const unsigned errorNumber, ...) {
+ va_list ap;
+ va_start(ap, errorNumber);
+ bool ok = ReportErrorNumberVA(cx, IsWarning::Yes, GetErrorMessage, nullptr,
+ errorNumber, ArgumentsAreUnicode, ap);
+ va_end(ap);
+ return ok;
+}
diff --git a/js/src/vm/Warnings.h b/js/src/vm/Warnings.h
new file mode 100644
index 0000000000..16fc0c844e
--- /dev/null
+++ b/js/src/vm/Warnings.h
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Warnings_h
+#define vm_Warnings_h
+
+struct JSContext;
+
+namespace js {
+
+// Internal API mirroring the public JS_ReportErrorNumber API.
+// We currently re-use the same errorNumbers.
+
+bool WarnNumberASCII(JSContext* cx, const unsigned errorNumber, ...);
+
+bool WarnNumberLatin1(JSContext* cx, const unsigned errorNumber, ...);
+
+bool WarnNumberUTF8(JSContext* cx, const unsigned errorNumber, ...);
+
+bool WarnNumberUC(JSContext* cx, const unsigned errorNumber, ...);
+
+} // namespace js
+
+#endif /* vm_Warnings_h */
diff --git a/js/src/vm/Watchtower.cpp b/js/src/vm/Watchtower.cpp
new file mode 100644
index 0000000000..96b5179499
--- /dev/null
+++ b/js/src/vm/Watchtower.cpp
@@ -0,0 +1,296 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/Watchtower.h"
+
+#include "js/CallAndConstruct.h"
+#include "vm/Compartment.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/NativeObject.h"
+#include "vm/PlainObject.h"
+#include "vm/Realm.h"
+
+#include "vm/Compartment-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/Realm-inl.h"
+#include "vm/Shape-inl.h"
+
+using namespace js;
+
+static bool AddToWatchtowerLog(JSContext* cx, const char* kind,
+ HandleObject obj, HandleValue extra) {
+ // Add an object storing {kind, object, extra} to the log for testing
+ // purposes.
+
+ MOZ_ASSERT(obj->useWatchtowerTestingLog());
+
+ RootedString kindString(cx, NewStringCopyZ<CanGC>(cx, kind));
+ if (!kindString) {
+ return false;
+ }
+
+ Rooted<PlainObject*> logObj(cx, NewPlainObjectWithProto(cx, nullptr));
+ if (!logObj) {
+ return false;
+ }
+ if (!JS_DefineProperty(cx, logObj, "kind", kindString, JSPROP_ENUMERATE)) {
+ return false;
+ }
+ if (!JS_DefineProperty(cx, logObj, "object", obj, JSPROP_ENUMERATE)) {
+ return false;
+ }
+ if (!JS_DefineProperty(cx, logObj, "extra", extra, JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ if (!cx->runtime()->watchtowerTestingLog->append(logObj)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+static bool ReshapeForShadowedProp(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id) {
+ // |obj| has been used as the prototype of another object. Check if we're
+ // shadowing a property on its proto chain. In this case we need to reshape
+ // that object for shape teleporting to work correctly.
+ //
+ // See also the 'Shape Teleporting Optimization' comment in jit/CacheIR.cpp.
+
+ MOZ_ASSERT(obj->isUsedAsPrototype());
+
+ // Lookups on integer ids cannot be cached through prototypes.
+ if (id.isInt()) {
+ return true;
+ }
+
+ RootedObject proto(cx, obj->staticPrototype());
+ while (proto) {
+ // Lookups will not be cached through non-native protos.
+ if (!proto->is<NativeObject>()) {
+ break;
+ }
+
+ if (proto->as<NativeObject>().contains(cx, id)) {
+ return JSObject::setInvalidatedTeleporting(cx, proto);
+ }
+
+ proto = proto->staticPrototype();
+ }
+
+ return true;
+}
+
+static void InvalidateMegamorphicCache(JSContext* cx,
+ Handle<NativeObject*> obj) {
+ // The megamorphic cache only checks the receiver object's shape. We need to
+ // invalidate the cache when a prototype object changes its set of properties,
+ // to account for cached properties that are deleted, turned into an accessor
+ // property, or shadowed by another object on the proto chain.
+
+ MOZ_ASSERT(obj->isUsedAsPrototype());
+
+ cx->caches().megamorphicCache.bumpGeneration();
+ cx->caches().megamorphicSetPropCache->bumpGeneration();
+}
+
+// static
+bool Watchtower::watchPropertyAddSlow(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id) {
+ MOZ_ASSERT(watchesPropertyAdd(obj));
+
+ if (obj->isUsedAsPrototype()) {
+ if (!ReshapeForShadowedProp(cx, obj, id)) {
+ return false;
+ }
+ if (!id.isInt()) {
+ InvalidateMegamorphicCache(cx, obj);
+ }
+ }
+
+ if (MOZ_UNLIKELY(obj->useWatchtowerTestingLog())) {
+ RootedValue val(cx, IdToValue(id));
+ if (!AddToWatchtowerLog(cx, "add-prop", obj, val)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool ReshapeForProtoMutation(JSContext* cx, HandleObject obj) {
+ // To avoid the JIT guarding on each prototype in the proto chain to detect
+ // prototype mutation, we can instead reshape the rest of the proto chain such
+ // that a guard on any of them is sufficient. To avoid excessive reshaping and
+ // invalidation, we apply heuristics to decide when to apply this and when
+ // to require a guard.
+ //
+ // There are two cases:
+ //
+ // (1) The object is not marked IsUsedAsPrototype. This is the common case.
+ // Because shape implies proto, we rely on the caller changing the
+ // object's shape. The JIT guards on this object's shape or prototype so
+ // there's nothing we have to do here for objects on the proto chain.
+ //
+ // (2) The object is marked IsUsedAsPrototype. This implies the object may be
+ // participating in shape teleporting. To invalidate JIT ICs depending on
+ // the proto chain being unchanged, set the InvalidatedTeleporting shape
+ // flag for this object and objects on its proto chain.
+ //
+ // This flag disables future shape teleporting attempts, so next time this
+ // happens the loop below will be a no-op.
+ //
+ // NOTE: We only handle NativeObjects and don't propagate reshapes through
+ // any non-native objects on the chain.
+ //
+ // See Also:
+ // - GeneratePrototypeGuards
+ // - GeneratePrototypeHoleGuards
+
+ MOZ_ASSERT(obj->isUsedAsPrototype());
+
+ RootedObject pobj(cx, obj);
+
+ while (pobj && pobj->is<NativeObject>()) {
+ if (!pobj->hasInvalidatedTeleporting()) {
+ if (!JSObject::setInvalidatedTeleporting(cx, pobj)) {
+ return false;
+ }
+ }
+ pobj = pobj->staticPrototype();
+ }
+
+ return true;
+}
+
+static bool WatchProtoChangeImpl(JSContext* cx, HandleObject obj) {
+ if (!obj->isUsedAsPrototype()) {
+ return true;
+ }
+ if (!ReshapeForProtoMutation(cx, obj)) {
+ return false;
+ }
+ if (obj->is<NativeObject>()) {
+ InvalidateMegamorphicCache(cx, obj.as<NativeObject>());
+ }
+ return true;
+}
+
+// static
+bool Watchtower::watchProtoChangeSlow(JSContext* cx, HandleObject obj) {
+ MOZ_ASSERT(watchesProtoChange(obj));
+
+ if (!WatchProtoChangeImpl(cx, obj)) {
+ return false;
+ }
+
+ if (MOZ_UNLIKELY(obj->useWatchtowerTestingLog())) {
+ if (!AddToWatchtowerLog(cx, "proto-change", obj,
+ JS::UndefinedHandleValue)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// static
+bool Watchtower::watchPropertyRemoveSlow(JSContext* cx,
+ Handle<NativeObject*> obj,
+ HandleId id) {
+ MOZ_ASSERT(watchesPropertyRemove(obj));
+
+ if (obj->isUsedAsPrototype() && !id.isInt()) {
+ InvalidateMegamorphicCache(cx, obj);
+ }
+
+ if (obj->isGenerationCountedGlobal()) {
+ obj->as<GlobalObject>().bumpGenerationCount();
+ }
+
+ if (MOZ_UNLIKELY(obj->useWatchtowerTestingLog())) {
+ RootedValue val(cx, IdToValue(id));
+ if (!AddToWatchtowerLog(cx, "remove-prop", obj, val)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// static
+bool Watchtower::watchPropertyChangeSlow(JSContext* cx,
+ Handle<NativeObject*> obj, HandleId id,
+ PropertyFlags flags) {
+ MOZ_ASSERT(watchesPropertyChange(obj));
+
+ if (obj->isUsedAsPrototype() && !id.isInt()) {
+ InvalidateMegamorphicCache(cx, obj);
+ }
+
+ if (obj->isGenerationCountedGlobal()) {
+ // The global generation counter only cares whether a property
+ // changes from data property to accessor or vice-versa. Changing
+ // the flags on a property doesn't matter.
+ uint32_t propIndex;
+ Rooted<PropMap*> map(cx, obj->shape()->lookup(cx, id, &propIndex));
+ MOZ_ASSERT(map);
+ PropertyInfo prop = map->getPropertyInfo(propIndex);
+ bool wasAccessor = prop.isAccessorProperty();
+ bool isAccessor = flags.isAccessorProperty();
+ if (wasAccessor != isAccessor) {
+ obj->as<GlobalObject>().bumpGenerationCount();
+ }
+ }
+
+ if (MOZ_UNLIKELY(obj->useWatchtowerTestingLog())) {
+ RootedValue val(cx, IdToValue(id));
+ if (!AddToWatchtowerLog(cx, "change-prop", obj, val)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// static
+bool Watchtower::watchFreezeOrSealSlow(JSContext* cx,
+ Handle<NativeObject*> obj) {
+ MOZ_ASSERT(watchesFreezeOrSeal(obj));
+
+ if (MOZ_UNLIKELY(obj->useWatchtowerTestingLog())) {
+ if (!AddToWatchtowerLog(cx, "freeze-or-seal", obj,
+ JS::UndefinedHandleValue)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// static
+bool Watchtower::watchObjectSwapSlow(JSContext* cx, HandleObject a,
+ HandleObject b) {
+ MOZ_ASSERT(watchesObjectSwap(a, b));
+
+ // If we're swapping an object that's used as prototype, we're mutating the
+ // proto chains of other objects. Treat this as a proto change to ensure we
+ // invalidate shape teleporting and megamorphic caches.
+ if (!WatchProtoChangeImpl(cx, a)) {
+ return false;
+ }
+ if (!WatchProtoChangeImpl(cx, b)) {
+ return false;
+ }
+
+ // Note: we don't invoke the testing callback for swap because the objects may
+ // not be safe to expose to JS at this point. See bug 1754699.
+
+ return true;
+}
diff --git a/js/src/vm/Watchtower.h b/js/src/vm/Watchtower.h
new file mode 100644
index 0000000000..b606da000b
--- /dev/null
+++ b/js/src/vm/Watchtower.h
@@ -0,0 +1,120 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Watchtower_h
+#define vm_Watchtower_h
+
+#include "js/TypeDecls.h"
+#include "vm/NativeObject.h"
+
+namespace js {
+
+// [SMDOC] Watchtower
+//
+// Watchtower is a framework to hook into changes to certain objects. This gives
+// us the ability to, for instance, invalidate caches or purge Warp code on
+// object layout changes.
+//
+// Watchtower is only used for objects with certain ObjectFlags set on the
+// Shape. This minimizes performance overhead for most objects.
+//
+// We currently use Watchtower for:
+//
+// - Invalidating the shape teleporting optimization. See the "Shape Teleporting
+// Optimization" SMDOC comment in CacheIR.cpp.
+//
+// - Invalidating the MegamorphicCache, a property lookup cache for megamorphic
+// property accesses. See the SMDOC comment in vm/Caches.h.
+//
+// There's also a testing mechanism that lets us write tests for Watchtower
+// hooks. See setWatchtowerCallback and addWatchtowerTarget defined in
+// TestingFunctions.cpp.
+class Watchtower {
+ static bool watchPropertyAddSlow(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id);
+ static bool watchPropertyRemoveSlow(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id);
+ static bool watchPropertyChangeSlow(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, PropertyFlags flags);
+ static bool watchFreezeOrSealSlow(JSContext* cx, Handle<NativeObject*> obj);
+ static bool watchProtoChangeSlow(JSContext* cx, HandleObject obj);
+ static bool watchObjectSwapSlow(JSContext* cx, HandleObject a,
+ HandleObject b);
+
+ public:
+ static bool watchesPropertyAdd(NativeObject* obj) {
+ return obj->hasAnyFlag(
+ {ObjectFlag::IsUsedAsPrototype, ObjectFlag::UseWatchtowerTestingLog});
+ }
+ static bool watchesPropertyRemove(NativeObject* obj) {
+ return obj->hasAnyFlag({ObjectFlag::IsUsedAsPrototype,
+ ObjectFlag::GenerationCountedGlobal,
+ ObjectFlag::UseWatchtowerTestingLog});
+ }
+ static bool watchesPropertyChange(NativeObject* obj) {
+ return obj->hasAnyFlag({ObjectFlag::IsUsedAsPrototype,
+ ObjectFlag::GenerationCountedGlobal,
+ ObjectFlag::UseWatchtowerTestingLog});
+ }
+ static bool watchesFreezeOrSeal(NativeObject* obj) {
+ return obj->hasAnyFlag({ObjectFlag::UseWatchtowerTestingLog});
+ }
+ static bool watchesProtoChange(JSObject* obj) {
+ return obj->hasAnyFlag(
+ {ObjectFlag::IsUsedAsPrototype, ObjectFlag::UseWatchtowerTestingLog});
+ }
+ static bool watchesObjectSwap(JSObject* a, JSObject* b) {
+ auto watches = [](JSObject* obj) {
+ return obj->hasAnyFlag(
+ {ObjectFlag::IsUsedAsPrototype, ObjectFlag::UseWatchtowerTestingLog});
+ };
+ return watches(a) || watches(b);
+ }
+
+ static bool watchPropertyAdd(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id) {
+ if (MOZ_LIKELY(!watchesPropertyAdd(obj))) {
+ return true;
+ }
+ return watchPropertyAddSlow(cx, obj, id);
+ }
+ static bool watchPropertyRemove(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id) {
+ if (MOZ_LIKELY(!watchesPropertyRemove(obj))) {
+ return true;
+ }
+ return watchPropertyRemoveSlow(cx, obj, id);
+ }
+ static bool watchPropertyChange(JSContext* cx, Handle<NativeObject*> obj,
+ HandleId id, PropertyFlags flags) {
+ if (MOZ_LIKELY(!watchesPropertyChange(obj))) {
+ return true;
+ }
+ return watchPropertyChangeSlow(cx, obj, id, flags);
+ }
+ static bool watchFreezeOrSeal(JSContext* cx, Handle<NativeObject*> obj) {
+ if (MOZ_LIKELY(!watchesFreezeOrSeal(obj))) {
+ return true;
+ }
+ return watchFreezeOrSealSlow(cx, obj);
+ }
+ static bool watchProtoChange(JSContext* cx, HandleObject obj) {
+ if (MOZ_LIKELY(!watchesProtoChange(obj))) {
+ return true;
+ }
+ return watchProtoChangeSlow(cx, obj);
+ }
+ static bool watchObjectSwap(JSContext* cx, HandleObject a, HandleObject b) {
+ if (MOZ_LIKELY(!watchesObjectSwap(a, b))) {
+ return true;
+ }
+ return watchObjectSwapSlow(cx, a, b);
+ }
+};
+
+} // namespace js
+
+#endif /* vm_Watchtower_h */
diff --git a/js/src/vm/WellKnownAtom.cpp b/js/src/vm/WellKnownAtom.cpp
new file mode 100644
index 0000000000..1ca810318b
--- /dev/null
+++ b/js/src/vm/WellKnownAtom.cpp
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/WellKnownAtom.h"
+
+#define DECLARE_CONST_CHAR_STR(IDPART, _, TEXT) char js_##IDPART##_str[] = TEXT;
+FOR_EACH_COMMON_PROPERTYNAME(DECLARE_CONST_CHAR_STR)
+#undef DECLARE_CONST_CHAR_STR
+
+#define DECLARE_CONST_CHAR_STR(NAME, _) char js_##NAME##_str[] = #NAME;
+JS_FOR_EACH_PROTOTYPE(DECLARE_CONST_CHAR_STR)
+#undef DECLARE_CONST_CHAR_STR
+
+#define DECLARE_CONST_CHAR_STR(NAME) char js_##NAME##_str[] = #NAME;
+JS_FOR_EACH_WELL_KNOWN_SYMBOL(DECLARE_CONST_CHAR_STR)
+#undef DECLARE_CONST_CHAR_STR
+
+js::WellKnownAtomInfo js::wellKnownAtomInfos[] = {
+#define ENUM_ENTRY_(IDPART, _, _2) \
+ {uint32_t(sizeof(js_##IDPART##_str) - 1), \
+ mozilla::HashStringKnownLength(js_##IDPART##_str, \
+ sizeof(js_##IDPART##_str) - 1), \
+ js_##IDPART##_str},
+ FOR_EACH_COMMON_PROPERTYNAME(ENUM_ENTRY_)
+#undef ENUM_ENTRY_
+
+#define ENUM_ENTRY_(NAME, _) \
+ {uint32_t(sizeof(js_##NAME##_str) - 1), \
+ mozilla::HashStringKnownLength(js_##NAME##_str, \
+ sizeof(js_##NAME##_str) - 1), \
+ js_##NAME##_str},
+ JS_FOR_EACH_PROTOTYPE(ENUM_ENTRY_)
+#undef ENUM_ENTRY_
+
+#define ENUM_ENTRY_(NAME) \
+ {uint32_t(sizeof(js_##NAME##_str) - 1), \
+ mozilla::HashStringKnownLength(js_##NAME##_str, \
+ sizeof(js_##NAME##_str) - 1), \
+ js_##NAME##_str},
+ JS_FOR_EACH_WELL_KNOWN_SYMBOL(ENUM_ENTRY_)
+#undef ENUM_ENTRY_
+};
diff --git a/js/src/vm/WellKnownAtom.h b/js/src/vm/WellKnownAtom.h
new file mode 100644
index 0000000000..c112eb61ed
--- /dev/null
+++ b/js/src/vm/WellKnownAtom.h
@@ -0,0 +1,67 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_WellKnownAtom_h
+#define vm_WellKnownAtom_h
+
+#include "mozilla/HashFunctions.h" // mozilla::HashNumber, mozilla::HashStringKnownLength
+
+#include <stdint.h> // uint32_t
+
+#include "js/ProtoKey.h" // JS_FOR_EACH_PROTOTYPE
+#include "js/Symbol.h" // JS_FOR_EACH_WELL_KNOWN_SYMBOL
+#include "vm/CommonPropertyNames.h" // FOR_EACH_COMMON_PROPERTYNAME
+
+/* Well-known predefined C strings. */
+#define DECLARE_CONST_CHAR_STR(IDPART, _, TEXT) extern char js_##IDPART##_str[];
+FOR_EACH_COMMON_PROPERTYNAME(DECLARE_CONST_CHAR_STR)
+#undef DECLARE_CONST_CHAR_STR
+
+#define DECLARE_CONST_CHAR_STR(NAME, _) extern char js_##NAME##_str[];
+JS_FOR_EACH_PROTOTYPE(DECLARE_CONST_CHAR_STR)
+#undef DECLARE_CONST_CHAR_STR
+
+#define DECLARE_CONST_CHAR_STR(NAME) extern char js_##NAME##_str[];
+JS_FOR_EACH_WELL_KNOWN_SYMBOL(DECLARE_CONST_CHAR_STR)
+#undef DECLARE_CONST_CHAR_STR
+
+namespace js {
+
+// An index for well-known atoms.
+//
+// GetWellKnownAtom in ParserAtom.cpp relies on the fact that
+// JSAtomState fields and this enum variants use the same order.
+enum class WellKnownAtomId : uint32_t {
+#define ENUM_ENTRY_(_, NAME, _2) NAME,
+ FOR_EACH_COMMON_PROPERTYNAME(ENUM_ENTRY_)
+#undef ENUM_ENTRY_
+
+#define ENUM_ENTRY_(NAME, _) NAME,
+ JS_FOR_EACH_PROTOTYPE(ENUM_ENTRY_)
+#undef ENUM_ENTRY_
+
+#define ENUM_ENTRY_(NAME) NAME,
+ JS_FOR_EACH_WELL_KNOWN_SYMBOL(ENUM_ENTRY_)
+#undef ENUM_ENTRY_
+
+ Limit,
+};
+
+struct WellKnownAtomInfo {
+ uint32_t length;
+ mozilla::HashNumber hash;
+ const char* content;
+};
+
+extern WellKnownAtomInfo wellKnownAtomInfos[];
+
+inline const WellKnownAtomInfo& GetWellKnownAtomInfo(WellKnownAtomId atomId) {
+ return wellKnownAtomInfos[uint32_t(atomId)];
+}
+
+} /* namespace js */
+
+#endif // vm_WellKnownAtom_h
diff --git a/js/src/vm/WindowProxy.cpp b/js/src/vm/WindowProxy.cpp
new file mode 100644
index 0000000000..60f8160ed4
--- /dev/null
+++ b/js/src/vm/WindowProxy.cpp
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* WindowProxy and Window implementation, for the web browser embedding. */
+
+#include "js/friend/WindowProxy.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include "js/Context.h" // js::AssertHeapIsIdle
+#include "vm/GlobalObject.h" // js::GlobalObject
+#include "vm/JSContext.h" // JSContext, CHECK_THREAD
+#include "vm/JSObject.h" // JSObject
+#include "vm/Runtime.h" // JSRuntime
+
+#include "vm/JSContext-inl.h" // JSContext::check
+#include "vm/JSObject-inl.h" // JSObject::nonCCWGlobal
+
+using JS::Handle;
+
+void js::SetWindowProxyClass(JSContext* cx, const JSClass* clasp) {
+ MOZ_ASSERT(!cx->runtime()->maybeWindowProxyClass());
+ cx->runtime()->setWindowProxyClass(clasp);
+}
+
+void js::SetWindowProxy(JSContext* cx, Handle<JSObject*> global,
+ Handle<JSObject*> windowProxy) {
+ AssertHeapIsIdle();
+ CHECK_THREAD(cx);
+
+ cx->check(global, windowProxy);
+ MOZ_ASSERT(IsWindowProxy(windowProxy));
+
+ GlobalObject& globalObj = global->as<GlobalObject>();
+ if (globalObj.maybeWindowProxy() != windowProxy) {
+ globalObj.setWindowProxy(windowProxy);
+ globalObj.lexicalEnvironment().setWindowProxyThisObject(windowProxy);
+ }
+}
+
+JSObject* js::ToWindowIfWindowProxy(JSObject* obj) {
+ if (IsWindowProxy(obj)) {
+ return &obj->nonCCWGlobal();
+ }
+
+ return obj;
+}
+
+JSObject* js::detail::ToWindowProxyIfWindowSlow(JSObject* obj) {
+ if (JSObject* windowProxy = obj->as<GlobalObject>().maybeWindowProxy()) {
+ return windowProxy;
+ }
+
+ return obj;
+}
+
+bool js::IsWindowProxy(JSObject* obj) {
+ // Note: simply checking `obj == obj->global().windowProxy()` is not
+ // sufficient: we may have transplanted the window proxy with a CCW.
+ // Check the Class to ensure we really have a window proxy.
+ return obj->getClass() ==
+ obj->runtimeFromAnyThread()->maybeWindowProxyClass();
+}
+
+bool js::detail::IsWindowSlow(JSObject* obj) {
+ return obj->as<GlobalObject>().maybeWindowProxy();
+}
diff --git a/js/src/vm/WrapperObject.h b/js/src/vm/WrapperObject.h
new file mode 100644
index 0000000000..e2e44962fa
--- /dev/null
+++ b/js/src/vm/WrapperObject.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_WrapperObject_h
+#define vm_WrapperObject_h
+
+#include "js/Wrapper.h"
+#include "vm/JSObject.h"
+#include "vm/ProxyObject.h"
+
+namespace js {
+
+// Proxy family for wrappers.
+// This variable exists solely to provide a unique address for use as an
+// identifier.
+extern const char sWrapperFamily;
+
+class WrapperObject : public ProxyObject {};
+
+class CrossCompartmentWrapperObject : public WrapperObject {
+ public:
+ static const unsigned GrayLinkReservedSlot = 1;
+};
+
+} // namespace js
+
+template <>
+inline bool JSObject::is<js::WrapperObject>() const {
+ return js::IsWrapper(this);
+}
+
+template <>
+inline bool JSObject::is<js::CrossCompartmentWrapperObject>() const {
+ return js::IsCrossCompartmentWrapper(this);
+}
+
+#endif /* vm_WrapperObject_h */
diff --git a/js/src/vm/Xdr.cpp b/js/src/vm/Xdr.cpp
new file mode 100644
index 0000000000..0bf2adcd33
--- /dev/null
+++ b/js/src/vm/Xdr.cpp
@@ -0,0 +1,167 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "vm/Xdr.h"
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_ASSERT_IF
+#include "mozilla/EndianUtils.h" // mozilla::NativeEndian, MOZ_LITTLE_ENDIAN
+#include "mozilla/Result.h" // mozilla::{Result, Ok, Err}, MOZ_TRY
+#include "mozilla/Utf8.h" // mozilla::Utf8Unit
+
+#include <algorithm> // std::transform
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t, uintptr_t
+#include <string> // std::char_traits
+#include <type_traits> // std::is_same_v
+#include <utility> // std::move
+
+#include "frontend/FrontendContext.h" // FrontendContext
+#include "js/Transcoding.h" // JS::TranscodeResult, JS::TranscodeBuffer, JS::TranscodeRange
+#include "js/UniquePtr.h" // UniquePtr
+#include "js/Utility.h" // JS::FreePolicy, js_delete
+#include "vm/JSContext.h" // JSContext, ReportAllocationOverflow
+#include "vm/StringType.h" // JSString
+
+using namespace js;
+
+using mozilla::Utf8Unit;
+
+#ifdef DEBUG
+bool XDRCoderBase::validateResultCode(FrontendContext* fc,
+ JS::TranscodeResult code) const {
+ return fc->hadErrors() == bool(code == JS::TranscodeResult::Throw);
+}
+#endif
+
+template <XDRMode mode>
+XDRResult XDRState<mode>::codeChars(char* chars, size_t nchars) {
+ return codeBytes(chars, nchars);
+}
+
+template <XDRMode mode>
+XDRResult XDRState<mode>::codeChars(Latin1Char* chars, size_t nchars) {
+ static_assert(sizeof(Latin1Char) == 1,
+ "Latin1Char must be 1 byte for nchars below to be the "
+ "proper count of bytes");
+ static_assert(std::is_same_v<Latin1Char, unsigned char>,
+ "Latin1Char must be unsigned char to C++-safely reinterpret "
+ "the bytes generically copied below as Latin1Char");
+ return codeBytes(chars, nchars);
+}
+
+template <XDRMode mode>
+XDRResult XDRState<mode>::codeChars(Utf8Unit* units, size_t count) {
+ if (count == 0) {
+ return Ok();
+ }
+
+ if (mode == XDR_ENCODE) {
+ uint8_t* ptr = buf->write(count);
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Throw);
+ }
+
+ std::transform(units, units + count, ptr,
+ [](const Utf8Unit& unit) { return unit.toUint8(); });
+ } else {
+ const uint8_t* ptr = buf->read(count);
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Failure_BadDecode);
+ }
+
+ std::transform(ptr, ptr + count, units,
+ [](const uint8_t& value) { return Utf8Unit(value); });
+ }
+
+ return Ok();
+}
+
+template <XDRMode mode>
+XDRResult XDRState<mode>::codeChars(char16_t* chars, size_t nchars) {
+ if (nchars == 0) {
+ return Ok();
+ }
+
+ size_t nbytes = nchars * sizeof(char16_t);
+ if (mode == XDR_ENCODE) {
+ uint8_t* ptr = buf->write(nbytes);
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Throw);
+ }
+
+ // |mozilla::NativeEndian| correctly handles writing into unaligned |ptr|.
+ mozilla::NativeEndian::copyAndSwapToLittleEndian(ptr, chars, nchars);
+ } else {
+ const uint8_t* ptr = buf->read(nbytes);
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Failure_BadDecode);
+ }
+
+ // |mozilla::NativeEndian| correctly handles reading from unaligned |ptr|.
+ mozilla::NativeEndian::copyAndSwapFromLittleEndian(chars, ptr, nchars);
+ }
+ return Ok();
+}
+
+template <XDRMode mode, typename CharT>
+static XDRResult XDRCodeCharsZ(XDRState<mode>* xdr,
+ XDRTranscodeString<CharT>& buffer) {
+ MOZ_ASSERT_IF(mode == XDR_ENCODE, !buffer.empty());
+ MOZ_ASSERT_IF(mode == XDR_DECODE, buffer.empty());
+
+ using OwnedString = js::UniquePtr<CharT[], JS::FreePolicy>;
+ OwnedString owned;
+
+ static_assert(JSString::MAX_LENGTH <= INT32_MAX,
+ "String length must fit in int32_t");
+
+ uint32_t length = 0;
+ CharT* chars = nullptr;
+
+ if (mode == XDR_ENCODE) {
+ chars = const_cast<CharT*>(buffer.template ref<const CharT*>());
+
+ // Set a reasonable limit on string length.
+ size_t lengthSizeT = std::char_traits<CharT>::length(chars);
+ if (lengthSizeT > JSString::MAX_LENGTH) {
+ ReportAllocationOverflow(xdr->fc());
+ return xdr->fail(JS::TranscodeResult::Throw);
+ }
+ length = static_cast<uint32_t>(lengthSizeT);
+ }
+ MOZ_TRY(xdr->codeUint32(&length));
+
+ if (mode == XDR_DECODE) {
+ owned =
+ xdr->fc()->getAllocator()->template make_pod_array<CharT>(length + 1);
+ if (!owned) {
+ return xdr->fail(JS::TranscodeResult::Throw);
+ }
+ chars = owned.get();
+ }
+
+ MOZ_TRY(xdr->codeChars(chars, length));
+ if (mode == XDR_DECODE) {
+ // Null-terminate and transfer ownership to caller.
+ owned[length] = '\0';
+ buffer.template construct<OwnedString>(std::move(owned));
+ }
+
+ return Ok();
+}
+
+template <XDRMode mode>
+XDRResult XDRState<mode>::codeCharsZ(XDRTranscodeString<char>& buffer) {
+ return XDRCodeCharsZ(this, buffer);
+}
+
+template <XDRMode mode>
+XDRResult XDRState<mode>::codeCharsZ(XDRTranscodeString<char16_t>& buffer) {
+ return XDRCodeCharsZ(this, buffer);
+}
+
+template class js::XDRState<XDR_ENCODE>;
+template class js::XDRState<XDR_DECODE>;
diff --git a/js/src/vm/Xdr.h b/js/src/vm/Xdr.h
new file mode 100644
index 0000000000..c37d94627a
--- /dev/null
+++ b/js/src/vm/Xdr.h
@@ -0,0 +1,457 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Xdr_h
+#define vm_Xdr_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_CRASH
+#include "mozilla/MaybeOneOf.h" // mozilla::MaybeOneOf
+#include "mozilla/Result.h" // mozilla::{Result, Ok, Err}, MOZ_TRY
+#include "mozilla/Utf8.h" // mozilla::Utf8Unit
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint16_t, uint32_t, uint64_t
+#include <string.h> // memcpy
+#include <type_traits> // std::enable_if_t
+
+#include "js/AllocPolicy.h" // ReportOutOfMemory
+#include "js/Transcoding.h" // JS::TranscodeResult, JS::TranscodeBuffer, JS::TranscodeRange, IsTranscodingBytecodeAligned, IsTranscodingBytecodeOffsetAligned
+#include "js/TypeDecls.h" // JS::Latin1Char
+#include "js/UniquePtr.h" // UniquePtr
+#include "js/Utility.h" // JS::FreePolicy
+
+struct JSContext;
+
+namespace js {
+
+enum XDRMode { XDR_ENCODE, XDR_DECODE };
+
+template <typename T>
+using XDRResultT = mozilla::Result<T, JS::TranscodeResult>;
+using XDRResult = XDRResultT<mozilla::Ok>;
+
+class XDRBufferBase {
+ public:
+ explicit XDRBufferBase(FrontendContext* fc, size_t cursor = 0)
+ : fc_(fc), cursor_(cursor) {}
+
+ FrontendContext* fc() const { return fc_; }
+
+ size_t cursor() const { return cursor_; }
+
+ protected:
+ FrontendContext* const fc_;
+ size_t cursor_;
+};
+
+template <XDRMode mode>
+class XDRBuffer;
+
+template <>
+class XDRBuffer<XDR_ENCODE> : public XDRBufferBase {
+ public:
+ XDRBuffer(FrontendContext* fc, JS::TranscodeBuffer& buffer, size_t cursor = 0)
+ : XDRBufferBase(fc, cursor), buffer_(buffer) {}
+
+ uint8_t* write(size_t n) {
+ MOZ_ASSERT(n != 0);
+ if (!buffer_.growByUninitialized(n)) {
+ ReportOutOfMemory(fc());
+ return nullptr;
+ }
+ uint8_t* ptr = &buffer_[cursor_];
+ cursor_ += n;
+ return ptr;
+ }
+
+ bool align32() {
+ size_t extra = cursor_ % 4;
+ if (extra) {
+ size_t padding = 4 - extra;
+ if (!buffer_.appendN(0, padding)) {
+ ReportOutOfMemory(fc());
+ return false;
+ }
+ cursor_ += padding;
+ }
+ return true;
+ }
+
+ bool isAligned32() { return cursor_ % 4 == 0; }
+
+ const uint8_t* read(size_t n) {
+ MOZ_CRASH("Should never read in encode mode");
+ return nullptr;
+ }
+
+ const uint8_t* peek(size_t n) {
+ MOZ_CRASH("Should never read in encode mode");
+ return nullptr;
+ }
+
+ uint8_t* bufferAt(size_t cursor) {
+ MOZ_ASSERT(cursor < buffer_.length());
+ return &buffer_[cursor];
+ }
+
+ private:
+ JS::TranscodeBuffer& buffer_;
+};
+
+template <>
+class XDRBuffer<XDR_DECODE> : public XDRBufferBase {
+ public:
+ XDRBuffer(FrontendContext* fc, const JS::TranscodeRange& range)
+ : XDRBufferBase(fc), buffer_(range) {}
+
+ // This isn't used by XDRStencilDecoder.
+ // Defined just for XDRState, shared with XDRStencilEncoder.
+ XDRBuffer(FrontendContext* fc, JS::TranscodeBuffer& buffer, size_t cursor = 0)
+ : XDRBufferBase(fc, cursor), buffer_(buffer.begin(), buffer.length()) {}
+
+ bool align32() {
+ size_t extra = cursor_ % 4;
+ if (extra) {
+ size_t padding = 4 - extra;
+ cursor_ += padding;
+
+ // Don't let buggy code read past our buffer
+ if (cursor_ > buffer_.length()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool isAligned32() { return cursor_ % 4 == 0; }
+
+ const uint8_t* read(size_t n) {
+ MOZ_ASSERT(cursor_ < buffer_.length());
+ const uint8_t* ptr = &buffer_[cursor_];
+ cursor_ += n;
+
+ // Don't let buggy code read past our buffer
+ if (cursor_ > buffer_.length()) {
+ return nullptr;
+ }
+
+ return ptr;
+ }
+
+ const uint8_t* peek(size_t n) {
+ MOZ_ASSERT(cursor_ < buffer_.length());
+ const uint8_t* ptr = &buffer_[cursor_];
+
+ // Don't let buggy code read past our buffer
+ if (cursor_ + n > buffer_.length()) {
+ return nullptr;
+ }
+
+ return ptr;
+ }
+
+ uint8_t* write(size_t n) {
+ MOZ_CRASH("Should never write in decode mode");
+ return nullptr;
+ }
+
+ private:
+ const JS::TranscodeRange buffer_;
+};
+
+template <typename CharT>
+using XDRTranscodeString =
+ mozilla::MaybeOneOf<const CharT*, js::UniquePtr<CharT[], JS::FreePolicy>>;
+
+class XDRCoderBase {
+ private:
+#ifdef DEBUG
+ JS::TranscodeResult resultCode_;
+#endif
+
+ protected:
+ XDRCoderBase()
+#ifdef DEBUG
+ : resultCode_(JS::TranscodeResult::Ok)
+#endif
+ {
+ }
+
+ public:
+#ifdef DEBUG
+ // Record logical failures of XDR.
+ JS::TranscodeResult resultCode() const { return resultCode_; }
+ void setResultCode(JS::TranscodeResult code) {
+ MOZ_ASSERT(resultCode() == JS::TranscodeResult::Ok);
+ resultCode_ = code;
+ }
+ bool validateResultCode(FrontendContext* fc, JS::TranscodeResult code) const;
+#endif
+};
+
+/*
+ * XDR serialization state. All data is encoded in native endian, except
+ * bytecode.
+ */
+template <XDRMode mode>
+class XDRState : public XDRCoderBase {
+ protected:
+ XDRBuffer<mode> mainBuf;
+ XDRBuffer<mode>* buf;
+
+ public:
+ XDRState(FrontendContext* fc, JS::TranscodeBuffer& buffer, size_t cursor = 0)
+ : mainBuf(fc, buffer, cursor), buf(&mainBuf) {}
+
+ template <typename RangeType>
+ XDRState(FrontendContext* fc, const RangeType& range)
+ : mainBuf(fc, range), buf(&mainBuf) {}
+
+ // No default copy constructor or copying assignment, because |buf|
+ // is an internal pointer.
+ XDRState(const XDRState&) = delete;
+ XDRState& operator=(const XDRState&) = delete;
+
+ ~XDRState() = default;
+
+ FrontendContext* fc() const { return mainBuf.fc(); }
+
+ template <typename T = mozilla::Ok>
+ XDRResultT<T> fail(JS::TranscodeResult code) {
+#ifdef DEBUG
+ MOZ_ASSERT(code != JS::TranscodeResult::Ok);
+ MOZ_ASSERT(validateResultCode(fc(), code));
+ setResultCode(code);
+#endif
+ return mozilla::Err(code);
+ }
+
+ XDRResult align32() {
+ if (!buf->align32()) {
+ return fail(JS::TranscodeResult::Throw);
+ }
+ return mozilla::Ok();
+ }
+
+ bool isAligned32() { return buf->isAligned32(); }
+
+ XDRResult readData(const uint8_t** pptr, size_t length) {
+ const uint8_t* ptr = buf->read(length);
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Failure_BadDecode);
+ }
+ *pptr = ptr;
+ return mozilla::Ok();
+ }
+
+ // Peek the `sizeof(T)` bytes and return the pointer to `*pptr`.
+ // The caller is responsible for aligning the buffer by calling `align32`.
+ template <typename T>
+ XDRResult peekData(const T** pptr) {
+ static_assert(alignof(T) <= 4);
+ MOZ_ASSERT(isAligned32());
+ const uint8_t* ptr = buf->peek(sizeof(T));
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Failure_BadDecode);
+ }
+ *pptr = reinterpret_cast<const T*>(ptr);
+ return mozilla::Ok();
+ }
+
+ // Peek uint32_t data.
+ XDRResult peekUint32(uint32_t* n) {
+ MOZ_ASSERT(mode == XDR_DECODE);
+ const uint8_t* ptr = buf->peek(sizeof(*n));
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Failure_BadDecode);
+ }
+ *n = *reinterpret_cast<const uint32_t*>(ptr);
+ return mozilla::Ok();
+ }
+
+ XDRResult codeUint8(uint8_t* n) {
+ if (mode == XDR_ENCODE) {
+ uint8_t* ptr = buf->write(sizeof(*n));
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Throw);
+ }
+ *ptr = *n;
+ } else {
+ const uint8_t* ptr = buf->read(sizeof(*n));
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Failure_BadDecode);
+ }
+ *n = *ptr;
+ }
+ return mozilla::Ok();
+ }
+
+ private:
+ template <typename T>
+ XDRResult codeUintImpl(T* n) {
+ if (mode == XDR_ENCODE) {
+ uint8_t* ptr = buf->write(sizeof(T));
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Throw);
+ }
+ memcpy(ptr, n, sizeof(T));
+ } else {
+ const uint8_t* ptr = buf->read(sizeof(T));
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Failure_BadDecode);
+ }
+ memcpy(n, ptr, sizeof(T));
+ }
+ return mozilla::Ok();
+ }
+
+ public:
+ XDRResult codeUint16(uint16_t* n) { return codeUintImpl(n); }
+
+ XDRResult codeUint32(uint32_t* n) { return codeUintImpl(n); }
+
+ XDRResult codeUint64(uint64_t* n) { return codeUintImpl(n); }
+
+ void codeUint32At(uint32_t* n, size_t cursor) {
+ if constexpr (mode == XDR_ENCODE) {
+ uint8_t* ptr = buf->bufferAt(cursor);
+ memcpy(ptr, n, sizeof(uint32_t));
+ } else {
+ MOZ_CRASH("not supported.");
+ }
+ }
+
+ const uint8_t* bufferAt(size_t cursor) const {
+ if constexpr (mode == XDR_ENCODE) {
+ return buf->bufferAt(cursor);
+ }
+
+ MOZ_CRASH("not supported.");
+ }
+
+ XDRResult peekArray(size_t n, const uint8_t** p) {
+ if constexpr (mode == XDR_DECODE) {
+ const uint8_t* ptr = buf->peek(n);
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Failure_BadDecode);
+ }
+
+ *p = ptr;
+
+ return mozilla::Ok();
+ }
+
+ MOZ_CRASH("not supported.");
+ }
+
+ /*
+ * Use SFINAE to refuse any specialization which is not an enum. Uses of
+ * this function do not have to specialize the type of the enumerated field
+ * as C++ will extract the parameterized from the argument list.
+ */
+ template <typename T>
+ XDRResult codeEnum32(T* val, std::enable_if_t<std::is_enum_v<T>>* = nullptr) {
+ // Mix the enumeration value with a random magic number, such that a
+ // corruption with a low-ranged value (like 0) is less likely to cause a
+ // miss-interpretation of the XDR content and instead cause a failure.
+ const uint32_t MAGIC = 0x21AB218C;
+ uint32_t tmp;
+ if (mode == XDR_ENCODE) {
+ tmp = uint32_t(*val) ^ MAGIC;
+ }
+ MOZ_TRY(codeUint32(&tmp));
+ if (mode == XDR_DECODE) {
+ *val = T(tmp ^ MAGIC);
+ }
+ return mozilla::Ok();
+ }
+
+ XDRResult codeDouble(double* dp) {
+ union DoublePun {
+ double d;
+ uint64_t u;
+ } pun;
+ if (mode == XDR_ENCODE) {
+ pun.d = *dp;
+ }
+ MOZ_TRY(codeUint64(&pun.u));
+ if (mode == XDR_DECODE) {
+ *dp = pun.d;
+ }
+ return mozilla::Ok();
+ }
+
+ XDRResult codeMarker(uint32_t magic) {
+ uint32_t actual = magic;
+ MOZ_TRY(codeUint32(&actual));
+ if (actual != magic) {
+ // Fail in debug, but only soft-fail in release
+ MOZ_ASSERT(false, "Bad XDR marker");
+ return fail(JS::TranscodeResult::Failure_BadDecode);
+ }
+ return mozilla::Ok();
+ }
+
+ XDRResult codeBytes(void* bytes, size_t len) {
+ if (len == 0) {
+ return mozilla::Ok();
+ }
+ if (mode == XDR_ENCODE) {
+ uint8_t* ptr = buf->write(len);
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Throw);
+ }
+ memcpy(ptr, bytes, len);
+ } else {
+ const uint8_t* ptr = buf->read(len);
+ if (!ptr) {
+ return fail(JS::TranscodeResult::Failure_BadDecode);
+ }
+ memcpy(bytes, ptr, len);
+ }
+ return mozilla::Ok();
+ }
+
+ // While encoding, code the given data to the buffer.
+ // While decoding, borrow the buffer and return it to `*data`.
+ //
+ // The data can have extra bytes after `sizeof(T)`, and the caller should
+ // provide the entire data length as `length`.
+ //
+ // The caller is responsible for aligning the buffer by calling `align32`.
+ template <typename T>
+ XDRResult borrowedData(T** data, uint32_t length) {
+ static_assert(alignof(T) <= 4);
+ MOZ_ASSERT(isAligned32());
+
+ if (mode == XDR_ENCODE) {
+ MOZ_TRY(codeBytes(*data, length));
+ } else {
+ const uint8_t* cursor = nullptr;
+ MOZ_TRY(readData(&cursor, length));
+ *data = reinterpret_cast<T*>(const_cast<uint8_t*>(cursor));
+ }
+ return mozilla::Ok();
+ }
+
+ // Prefer using a variant below that is encoding aware.
+ XDRResult codeChars(char* chars, size_t nchars);
+
+ XDRResult codeChars(JS::Latin1Char* chars, size_t nchars);
+ XDRResult codeChars(mozilla::Utf8Unit* units, size_t nchars);
+ XDRResult codeChars(char16_t* chars, size_t nchars);
+
+ // Transcode null-terminated strings. When decoding, a new buffer is
+ // allocated and ownership is returned to caller.
+ //
+ // NOTE: Throws if string longer than JSString::MAX_LENGTH.
+ XDRResult codeCharsZ(XDRTranscodeString<char>& buffer);
+ XDRResult codeCharsZ(XDRTranscodeString<char16_t>& buffer);
+};
+
+} /* namespace js */
+
+#endif /* vm_Xdr_h */
diff --git a/js/src/vm/jsopcode.py b/js/src/vm/jsopcode.py
new file mode 100644
index 0000000000..4f3ff66d8c
--- /dev/null
+++ b/js/src/vm/jsopcode.py
@@ -0,0 +1,382 @@
+#!/usr/bin/env python3 -B
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+
+quoted_pat = re.compile(r"([^A-Za-z0-9]|^)'([^']+)'")
+js_pat = re.compile(r"([^A-Za-z0-9]|^)(JS[A-Z0-9_\*]+)")
+
+
+def codify(text):
+ text = re.sub(quoted_pat, "\\1<code>\\2</code>", text)
+ text = re.sub(js_pat, "\\1<code>\\2</code>", text)
+
+ return text
+
+
+space_star_space_pat = re.compile("^\s*\* ?", re.M)
+
+
+def get_comment_body(comment):
+ return re.sub(space_star_space_pat, "", comment).split("\n")
+
+
+quote_pat = re.compile('"([^"]+)"')
+str_pat = re.compile("js_([^_]+)_str")
+
+
+def parse_name(s):
+ m = quote_pat.search(s)
+ if m:
+ return m.group(1)
+ m = str_pat.search(s)
+ if m:
+ return m.group(1)
+ return s
+
+
+csv_pat = re.compile(", *")
+
+
+def parse_csv(s):
+ a = csv_pat.split(s)
+ if len(a) == 1 and a[0] == "":
+ return []
+ return a
+
+
+def get_stack_count(stack):
+ if stack == "":
+ return 0
+ if "..." in stack:
+ return -1
+ return len(stack.split(","))
+
+
+def parse_index(comment):
+ index = []
+ current_types = None
+ category_name = ""
+ category_pat = re.compile("\[([^\]]+)\]")
+ for line in get_comment_body(comment):
+ m = category_pat.search(line)
+ if m:
+ category_name = m.group(1)
+ if category_name == "Index":
+ continue
+ current_types = []
+ index.append((category_name, current_types))
+ else:
+ type_name = line.strip()
+ if type_name and current_types is not None:
+ current_types.append((type_name, []))
+
+ return index
+
+
+# Holds the information stored in the comment with the following format:
+# /*
+# * {desc}
+# * Category: {category_name}
+# * Type: {type_name}
+# * Operands: {operands}
+# * Stack: {stack_uses} => {stack_defs}
+# */
+
+
+class CommentInfo:
+ def __init__(self):
+ self.desc = ""
+ self.category_name = ""
+ self.type_name = ""
+ self.operands = ""
+ self.stack_uses = ""
+ self.stack_defs = ""
+
+
+# Holds the information stored in the macro with the following format:
+# MACRO({op}, {op_snake}, {token}, {length}, {nuses}, {ndefs}, {format})
+# and the information from CommentInfo.
+
+
+class OpcodeInfo:
+ def __init__(self, value, comment_info):
+ self.op = ""
+ self.op_snake = ""
+ self.value = value
+ self.token = ""
+ self.length = ""
+ self.nuses = ""
+ self.ndefs = ""
+ self.format_ = ""
+
+ self.operands_array = []
+ self.stack_uses_array = []
+ self.stack_defs_array = []
+
+ self.desc = comment_info.desc
+ self.category_name = comment_info.category_name
+ self.type_name = comment_info.type_name
+ self.operands = comment_info.operands
+ self.operands_array = comment_info.operands_array
+ self.stack_uses = comment_info.stack_uses
+ self.stack_uses_array = comment_info.stack_uses_array
+ self.stack_defs = comment_info.stack_defs
+ self.stack_defs_array = comment_info.stack_defs_array
+
+ # List of OpcodeInfo that corresponds to macros after this.
+ # /*
+ # * comment
+ # */
+ # MACRO(JSOP_SUB, ...)
+ # MACRO(JSOP_MUL, ...)
+ # MACRO(JSOP_DIV, ...)
+ self.group = []
+
+ self.sort_key = ""
+
+
+def find_by_name(list, name):
+ for (n, body) in list:
+ if n == name:
+ return body
+
+ return None
+
+
+def add_to_index(index, opcode):
+ types = find_by_name(index, opcode.category_name)
+ if types is None:
+ raise Exception(
+ "Category is not listed in index: "
+ "{name}".format(name=opcode.category_name)
+ )
+ opcodes = find_by_name(types, opcode.type_name)
+ if opcodes is None:
+ if opcode.type_name:
+ raise Exception(
+ "Type is not listed in {category}: "
+ "{name}".format(category=opcode.category_name, name=opcode.type_name)
+ )
+ types.append((opcode.type_name, [opcode]))
+ return
+
+ opcodes.append(opcode)
+
+
+tag_pat = re.compile("^\s*[A-Za-z]+:\s*|\s*$")
+
+
+def get_tag_value(line):
+ return re.sub(tag_pat, "", line)
+
+
+RUST_OR_CPP_KEYWORDS = {
+ "and",
+ "case",
+ "default",
+ "double",
+ "false",
+ "goto",
+ "in",
+ "new",
+ "not",
+ "or",
+ "return",
+ "throw",
+ "true",
+ "try",
+ "typeof",
+ "void",
+}
+
+
+def get_opcodes(dir):
+ iter_pat = re.compile(
+ r"/\*(.*?)\*/" # either a documentation comment...
+ r"|"
+ r"MACRO\(" # or a MACRO(...) call
+ r"(?P<op>[^,]+),\s*"
+ r"(?P<op_snake>[^,]+),\s*"
+ r"(?P<token>[^,]+,)\s*"
+ r"(?P<length>[0-9\-]+),\s*"
+ r"(?P<nuses>[0-9\-]+),\s*"
+ r"(?P<ndefs>[0-9\-]+),\s*"
+ r"(?P<format>[^\)]+)"
+ r"\)",
+ re.S,
+ )
+ stack_pat = re.compile(r"^(?P<uses>.*?)" r"\s*=>\s*" r"(?P<defs>.*?)$")
+
+ opcodes = dict()
+ index = []
+
+ with open("{dir}/js/src/vm/Opcodes.h".format(dir=dir), "r", encoding="utf-8") as f:
+ data = f.read()
+
+ comment_info = None
+ opcode = None
+
+ # The first opcode after the comment.
+ group_head = None
+ next_opcode_value = 0
+
+ for m in re.finditer(iter_pat, data):
+ comment = m.group(1)
+ op = m.group("op")
+
+ if comment:
+ if "[Index]" in comment:
+ index = parse_index(comment)
+ continue
+
+ if "Operands:" not in comment:
+ continue
+
+ group_head = None
+
+ comment_info = CommentInfo()
+
+ state = "desc"
+ stack = ""
+ desc = ""
+
+ for line in get_comment_body(comment):
+ if line.startswith(" Category:"):
+ state = "category"
+ comment_info.category_name = get_tag_value(line)
+ elif line.startswith(" Type:"):
+ state = "type"
+ comment_info.type_name = get_tag_value(line)
+ elif line.startswith(" Operands:"):
+ state = "operands"
+ comment_info.operands = get_tag_value(line)
+ elif line.startswith(" Stack:"):
+ state = "stack"
+ stack = get_tag_value(line)
+ elif state == "desc":
+ desc += line + "\n"
+ elif line.startswith(" "):
+ if line.isspace():
+ pass
+ elif state == "operands":
+ comment_info.operands += " " + line.strip()
+ elif state == "stack":
+ stack += " " + line.strip()
+ else:
+ raise ValueError(
+ "unrecognized line in comment: {!r}\n\nfull comment was:\n{}".format(
+ line, comment
+ )
+ )
+
+ comment_info.desc = desc
+
+ comment_info.operands_array = parse_csv(comment_info.operands)
+ comment_info.stack_uses_array = parse_csv(comment_info.stack_uses)
+ comment_info.stack_defs_array = parse_csv(comment_info.stack_defs)
+
+ m2 = stack_pat.search(stack)
+ if m2:
+ comment_info.stack_uses = m2.group("uses")
+ comment_info.stack_defs = m2.group("defs")
+ else:
+ assert op is not None
+ opcode = OpcodeInfo(next_opcode_value, comment_info)
+ next_opcode_value += 1
+
+ opcode.op = op
+ opcode.op_snake = m.group("op_snake")
+ opcode.token = parse_name(m.group("token"))
+ opcode.length = m.group("length")
+ opcode.nuses = m.group("nuses")
+ opcode.ndefs = m.group("ndefs")
+ opcode.format_ = m.group("format").split("|")
+
+ expected_snake = re.sub(r"(?<!^)(?=[A-Z])", "_", opcode.op).lower()
+ if expected_snake in RUST_OR_CPP_KEYWORDS:
+ expected_snake += "_"
+ if opcode.op_snake != expected_snake:
+ raise ValueError(
+ "Unexpected snake-case name for {}: expected {!r}, got {!r}".format(
+ opcode.op_camel, expected_snake, opcode.op_snake
+ )
+ )
+
+ if not group_head:
+ group_head = opcode
+
+ opcode.sort_key = opcode.op
+ if opcode.category_name == "":
+ raise Exception(
+ "Category is not specified for " "{op}".format(op=opcode.op)
+ )
+ add_to_index(index, opcode)
+ else:
+ if group_head.length != opcode.length:
+ raise Exception(
+ "length should be same for opcodes of the"
+ " same group: "
+ "{value1}({op1}) != "
+ "{value2}({op2})".format(
+ op1=group_head.op,
+ value1=group_head.length,
+ op2=opcode.op,
+ value2=opcode.length,
+ )
+ )
+ if group_head.nuses != opcode.nuses:
+ raise Exception(
+ "nuses should be same for opcodes of the"
+ " same group: "
+ "{value1}({op1}) != "
+ "{value2}({op2})".format(
+ op1=group_head.op,
+ value1=group_head.nuses,
+ op2=opcode.op,
+ value2=opcode.nuses,
+ )
+ )
+ if group_head.ndefs != opcode.ndefs:
+ raise Exception(
+ "ndefs should be same for opcodes of the"
+ " same group: "
+ "{value1}({op1}) != "
+ "{value2}({op2})".format(
+ op1=group_head.op,
+ value1=group_head.ndefs,
+ op2=opcode.op,
+ value2=opcode.ndefs,
+ )
+ )
+
+ group_head.group.append(opcode)
+
+ if opcode.op < group_head.op:
+ group_head.sort_key = opcode.op
+
+ opcodes[op] = opcode
+
+ # Verify stack notation.
+ nuses = int(opcode.nuses)
+ ndefs = int(opcode.ndefs)
+
+ stack_nuses = get_stack_count(opcode.stack_uses)
+ stack_ndefs = get_stack_count(opcode.stack_defs)
+
+ if nuses != -1 and stack_nuses != -1 and nuses != stack_nuses:
+ raise Exception(
+ "nuses should match stack notation: {op}: "
+ "{nuses} != {stack_nuses} "
+ "(stack_nuses)".format(op=op, nuses=nuses, stack_nuses=stack_nuses)
+ )
+ if ndefs != -1 and stack_ndefs != -1 and ndefs != stack_ndefs:
+ raise Exception(
+ "ndefs should match stack notation: {op}: "
+ "{ndefs} != {stack_ndefs} "
+ "(stack_ndefs)".format(op=op, ndefs=ndefs, stack_ndefs=stack_ndefs)
+ )
+
+ return index, opcodes
diff --git a/js/src/vm/make_opcode_doc.py b/js/src/vm/make_opcode_doc.py
new file mode 100755
index 0000000000..11ce81a3ed
--- /dev/null
+++ b/js/src/vm/make_opcode_doc.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python3 -B
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+""" Usage: python make_opcode_doc.py
+
+ This script generates SpiderMonkey bytecode documentation
+ from js/src/vm/Opcodes.h.
+
+ Output is written to stdout and should be pasted into the following
+ MDN page:
+ https://developer.mozilla.org/en-US/docs/SpiderMonkey/Internals/Bytecode
+"""
+
+import os
+import sys
+
+# Allow this script to be run from anywhere.
+this_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, this_dir)
+
+
+from xml.sax.saxutils import escape
+
+import jsopcode
+
+try:
+ import markdown
+except ModuleNotFoundError as exc:
+ if exc.name == "markdown":
+ # Right, most people won't have python-markdown installed. Suggest the
+ # most likely path to getting this running.
+ print("Failed to import markdown: " + exc.msg, file=sys.stderr)
+ if os.path.exists(os.path.join(this_dir, "venv")):
+ print(
+ "It looks like you previously created a virtualenv here. Try this:\n"
+ " . venv/bin/activate",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+ print(
+ "Try this:\n"
+ " pip3 install markdown\n"
+ "Or, if you want to avoid installing things globally:\n"
+ " python3 -m venv venv && . venv/bin/activate && pip3 install markdown",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+ raise exc
+except ImportError as exc:
+ # Oh no! Markdown failed to load. Check for a specific known issue.
+ if exc.msg.startswith("bad magic number in 'opcode'") and os.path.isfile(
+ os.path.join(this_dir, "opcode.pyc")
+ ):
+ print(
+ "Failed to import markdown due to bug 1506380.\n"
+ "This is dumb--it's an old Python cache file in your directory. Try this:\n"
+ " rm " + this_dir + "/opcode.pyc\n"
+ "The file is obsolete since November 2018.",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+ raise exc
+
+
+SOURCE_BASE = "https://searchfox.org/mozilla-central/source"
+
+FORMAT_TO_IGNORE = {
+ "JOF_BYTE",
+ "JOF_UINT8",
+ "JOF_UINT16",
+ "JOF_UINT24",
+ "JOF_UINT32",
+ "JOF_INT8",
+ "JOF_INT32",
+ "JOF_TABLESWITCH",
+ "JOF_REGEXP",
+ "JOF_DOUBLE",
+ "JOF_LOOPHEAD",
+ "JOF_BIGINT",
+}
+
+
+def format_format(format):
+ format = [flag for flag in format if flag not in FORMAT_TO_IGNORE]
+ if len(format) == 0:
+ return ""
+ return "<div>Format: {format}</div>\n".format(format=", ".join(format))
+
+
+def maybe_escape(value, format_str, fallback=""):
+ if value:
+ return format_str.format(escape(value))
+ return fallback
+
+
+OPCODE_FORMAT = """\
+<dt id="{id}">{names}</dt>
+<dd>
+{operands}{stack}{desc}
+{format}</dd>
+"""
+
+
+def print_opcode(opcode):
+ opcodes = [opcode] + opcode.group
+ names = ", ".join(maybe_escape(code.op, "<code>{}</code>") for code in opcodes)
+ operands = maybe_escape(opcode.operands, "<div>Operands: <code>({})</code></div>\n")
+ stack_uses = maybe_escape(opcode.stack_uses, "<code>{}</code> ")
+ stack_defs = maybe_escape(opcode.stack_defs, " <code>{}</code>")
+ if stack_uses or stack_defs:
+ stack = "<div>Stack: {}&rArr;{}</div>\n".format(stack_uses, stack_defs)
+ else:
+ stack = ""
+
+ print(
+ OPCODE_FORMAT.format(
+ id=opcodes[0].op,
+ names=names,
+ operands=operands,
+ stack=stack,
+ desc=markdown.markdown(opcode.desc),
+ format=format_format(opcode.format_),
+ )
+ )
+
+
+id_cache = dict()
+id_count = dict()
+
+
+def make_element_id(category, type=""):
+ key = "{}:{}".format(category, type)
+ if key in id_cache:
+ return id_cache[key]
+
+ if type == "":
+ id = category.replace(" ", "_")
+ else:
+ id = type.replace(" ", "_")
+
+ if id in id_count:
+ id_count[id] += 1
+ id = "{}_{}".format(id, id_count[id])
+ else:
+ id_count[id] = 1
+
+ id_cache[key] = id
+ return id
+
+
+def print_doc(index):
+ print(
+ """<div>{{{{SpiderMonkeySidebar("Internals")}}}}</div>
+
+<h2 id="Bytecode_Listing">Bytecode Listing</h2>
+
+<p>This document is automatically generated from
+<a href="{source_base}/js/src/vm/Opcodes.h">Opcodes.h</a> by
+<a href="{source_base}/js/src/vm/make_opcode_doc.py">make_opcode_doc.py</a>.</p>
+""".format(
+ source_base=SOURCE_BASE
+ )
+ )
+
+ for (category_name, types) in index:
+ print(
+ '<h3 id="{id}">{name}</h3>'.format(
+ name=category_name, id=make_element_id(category_name)
+ )
+ )
+ for (type_name, opcodes) in types:
+ if type_name:
+ print(
+ '<h4 id="{id}">{name}</h4>'.format(
+ name=type_name, id=make_element_id(category_name, type_name)
+ )
+ )
+ print("<dl>")
+ for opcode in opcodes:
+ print_opcode(opcode)
+ print("</dl>")
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 1:
+ print("Usage: mach python make_opcode_doc.py", file=sys.stderr)
+ sys.exit(1)
+ js_src_vm_dir = os.path.dirname(os.path.realpath(__file__))
+ root_dir = os.path.abspath(os.path.join(js_src_vm_dir, "..", "..", ".."))
+
+ index, _ = jsopcode.get_opcodes(root_dir)
+ print_doc(index)